Commit e71f7103 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/network-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 99202312 bb9a9e6e
......@@ -110,8 +110,8 @@
#endif
extern const struct atmdev_ops fore200e_ops;
extern const struct fore200e_bus fore200e_bus[];
static const struct atmdev_ops fore200e_ops;
static const struct fore200e_bus fore200e_bus[];
static struct fore200e* fore200e_boards = NULL;
......
......@@ -68,8 +68,6 @@
#define IF_IADBG_SUNI_STAT 0x02000000 // suni statistics
#define IF_IADBG_RESET 0x04000000
extern unsigned int IADebugFlag;
#define IF_IADBG(f) if (IADebugFlag & (f))
#ifdef CONFIG_ATM_IA_DEBUG /* Debug build */
......
......@@ -79,6 +79,8 @@
#define PPPOE_HASH_BITS 4
#define PPPOE_HASH_SIZE (1<<PPPOE_HASH_BITS)
static struct ppp_channel_ops pppoe_chan_ops;
static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
......
......@@ -52,7 +52,6 @@ extern int arlan_debug;
extern int arlan_entry_debug;
extern int arlan_exit_debug;
extern int testMemory;
extern const char* arlan_version;
extern int arlan_command(struct net_device * dev, int command);
#define SIDUNKNOWN -1
......
......@@ -677,6 +677,24 @@ static inline void hlist_add_after(struct hlist_node *n,
pos && ({ n = pos->next; 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop counter.
* @pos: the &struct hlist_node to use as a loop counter.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
for (pos = (head)->first; \
pos && ({ prefetch(pos->next); 1;}) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
#else
#warning "don't include kernel headers in userspace"
#endif /* __KERNEL__ */
......
......@@ -117,8 +117,6 @@ enum
struct tc_police
{
__u32 index;
int refcnt;
int bindcnt;
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
......@@ -131,6 +129,9 @@ struct tc_police
__u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
int refcnt;
int bindcnt;
__u32 capab;
};
struct tcf_t
......@@ -188,6 +189,7 @@ enum
TCA_U32_POLICE,
TCA_U32_ACT,
TCA_U32_INDEV,
TCA_U32_PCNT,
__TCA_U32_MAX
};
......@@ -199,7 +201,6 @@ struct tc_u32_key
__u32 val;
int off;
int offmask;
__u32 kcnt;
};
struct tc_u32_sel
......@@ -215,10 +216,16 @@ struct tc_u32_sel
short hoff;
__u32 hmask;
struct tc_u32_key keys[0];
unsigned long rcnt;
unsigned long rhit;
};
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt
{
__u64 rcnt;
__u64 rhit;
__u64 kcnts[0];
};
#endif
/* Flags */
#define TC_U32_TERMINAL 1
......@@ -283,8 +290,8 @@ enum
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
TCA_FW_INDEV,
TCA_FW_ACT,
TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
__TCA_FW_MAX
};
......
......@@ -233,7 +233,7 @@ struct sk_buff {
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48];
char cb[40];
unsigned int len,
data_len,
......
......@@ -210,6 +210,4 @@ static inline int irttp_is_primary(struct tsap_cb *self)
return(irlap_is_primary(self->lsap->lap->irlap));
}
extern struct irttp_cb *irttp;
#endif /* IRTTP_H */
......@@ -112,9 +112,6 @@ struct nr_node {
* nr_node & nr_neigh lists, refcounting and locking
*********************************************************************/
extern struct hlist_head nr_node_list;
extern struct hlist_head nr_neigh_list;
#define nr_node_hold(__nr_node) \
atomic_inc(&((__nr_node)->refcount))
......
......@@ -829,6 +829,7 @@ extern int xfrm6_tunnel_check_size(struct sk_buff *skb);
extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
extern int xfrm6_output(struct sk_buff **pskb);
#ifdef CONFIG_XFRM
extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
......
......@@ -109,6 +109,8 @@ source "net/ipv4/Kconfig"
config IPV6
tristate "The IPv6 protocol (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
select CRYPTO if IPV6_PRIVACY
select CRYPTO_MD5 if IPV6_PRIVACY
---help---
This is experimental support for the IP version 6 (formerly called
IPng "IP next generation"). You will still be able to do
......
......@@ -28,43 +28,28 @@ static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
return &br->statistics;
}
static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br;
unsigned char *dest;
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
br = dev->priv;
br->statistics.tx_packets++;
br->statistics.tx_bytes += skb->len;
dest = skb->mac.raw = skb->data;
skb->mac.raw = skb->data;
skb_pull(skb, ETH_HLEN);
if (dest[0] & 1) {
rcu_read_lock();
if (dest[0] & 1)
br_flood_deliver(br, skb, 0);
return 0;
}
if ((dst = br_fdb_get(br, dest)) != NULL) {
else if ((dst = __br_fdb_get(br, dest)) != NULL)
br_deliver(dst->dst, skb);
br_fdb_put(dst);
return 0;
}
br_flood_deliver(br, skb, 0);
return 0;
}
int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
int ret;
else
br_flood_deliver(br, skb, 0);
rcu_read_lock();
ret = __br_dev_xmit(skb, dev);
rcu_read_unlock();
return ret;
return 0;
}
static int br_dev_open(struct net_device *dev)
......
......@@ -73,7 +73,7 @@ static __inline__ int br_mac_hash(const unsigned char *mac)
static __inline__ void fdb_delete(struct net_bridge_fdb_entry *f)
{
hlist_del(&f->hlist);
hlist_del_rcu(&f->hlist);
if (!f->is_static)
list_del(&f->age_list);
......@@ -85,7 +85,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge *br = p->br;
int i;
write_lock_bh(&br->hash_lock);
spin_lock_bh(&br->hash_lock);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
......@@ -117,7 +117,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
fdb_insert(br, p, newaddr, 1);
write_unlock_bh(&br->hash_lock);
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
......@@ -126,7 +126,7 @@ void br_fdb_cleanup(unsigned long _data)
struct list_head *l, *n;
unsigned long delay;
write_lock_bh(&br->hash_lock);
spin_lock_bh(&br->hash_lock);
delay = hold_time(br);
list_for_each_safe(l, n, &br->age_list) {
......@@ -144,14 +144,14 @@ void br_fdb_cleanup(unsigned long _data)
break;
}
}
write_unlock_bh(&br->hash_lock);
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
{
int i;
write_lock_bh(&br->hash_lock);
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
......@@ -182,37 +182,53 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
skip_delete: ;
}
}
write_unlock_bh(&br->hash_lock);
spin_unlock_bh(&br->hash_lock);
}
struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br, unsigned char *addr)
/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr)
{
struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
read_lock_bh(&br->hash_lock);
hlist_for_each(h, &br->hash[br_mac_hash(addr)]) {
struct net_bridge_fdb_entry *fdb
= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
if (!memcmp(fdb->addr.addr, addr, ETH_ALEN)) {
if (has_expired(br, fdb))
goto ret_null;
atomic_inc(&fdb->use_count);
read_unlock_bh(&br->hash_lock);
if (unlikely(has_expired(br, fdb)))
break;
return fdb;
}
}
ret_null:
read_unlock_bh(&br->hash_lock);
return NULL;
}
/* Interface used by ATM hook that keeps a ref count */
struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
unsigned char *addr)
{
struct net_bridge_fdb_entry *fdb;
rcu_read_lock();
fdb = __br_fdb_get(br, addr);
if (fdb)
atomic_inc(&fdb->use_count);
rcu_read_unlock();
return fdb;
}
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
= container_of(head, struct net_bridge_fdb_entry, rcu);
kmem_cache_free(br_fdb_cache, ent);
}
/* Set entry up for deletion with RCU */
void br_fdb_put(struct net_bridge_fdb_entry *ent)
{
if (atomic_dec_and_test(&ent->use_count))
kmem_cache_free(br_fdb_cache, ent);
call_rcu(&ent->rcu, fdb_rcu_free);
}
/*
......@@ -229,9 +245,9 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
read_lock_bh(&br->hash_lock);
rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry(f, h, &br->hash[i], hlist) {
hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
......@@ -255,7 +271,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
}
out:
read_unlock_bh(&br->hash_lock);
rcu_read_unlock();
return num;
}
......@@ -309,7 +325,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
memcpy(fdb->addr.addr, addr, ETH_ALEN);
atomic_set(&fdb->use_count, 1);
hlist_add_head(&fdb->hlist, &br->hash[hash]);
hlist_add_head_rcu(&fdb->hlist, &br->hash[hash]);
if (!timer_pending(&br->gc_timer)) {
br->gc_timer.expires = jiffies + hold_time(br);
......@@ -332,8 +348,8 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
{
int ret;
write_lock_bh(&br->hash_lock);
spin_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, is_local);
write_unlock_bh(&br->hash_lock);
spin_unlock_bh(&br->hash_lock);
return ret;
}
......@@ -149,7 +149,7 @@ static struct net_device *new_bridge_dev(const char *name)
br->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&br->port_list);
br->hash_lock = RW_LOCK_UNLOCKED;
br->hash_lock = SPIN_LOCK_UNLOCKED;
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
......@@ -295,6 +295,7 @@ int br_del_bridge(const char *name)
return ret;
}
/* Mtu of the bridge pseudo-device 1500 or the minimum of the ports */
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
......@@ -343,11 +344,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
if ((br->dev->flags & IFF_UP) && (dev->flags & IFF_UP))
if ((br->dev->flags & IFF_UP)
&& (dev->flags & IFF_UP) && netif_carrier_ok(dev))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
br->dev->mtu = br_min_mtu(br);
dev_set_mtu(br->dev, br_min_mtu(br));
}
return err;
......
......@@ -83,19 +83,17 @@ int br_handle_frame_finish(struct sk_buff *skb)
goto out;
}
dst = br_fdb_get(br, dest);
dst = __br_fdb_get(br, dest);
if (dst != NULL && dst->is_local) {
if (!passedup)
br_pass_frame_up(br, skb);
else
kfree_skb(skb);
br_fdb_put(dst);
goto out;
}
if (dst != NULL) {
br_forward(dst->dst, skb);
br_fdb_put(dst);
goto out;
}
......
......@@ -19,58 +19,59 @@
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
struct notifier_block br_device_notifier =
{
struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev;
struct net_bridge_port *p;
struct net_device *dev = ptr;
struct net_bridge_port *p = dev->br_port;
struct net_bridge *br;
dev = ptr;
p = dev->br_port;
if (p == NULL)
return NOTIFY_DONE;
br = p->br;
if ( !(br->dev->flags & IFF_UP))
return NOTIFY_DONE;
if (event == NETDEV_CHANGEMTU) {
dev_set_mtu(br->dev, br_min_mtu(br));
return NOTIFY_DONE;
}
spin_lock_bh(&br->lock);
switch (event) {
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
if (br->dev->flags & IFF_UP)
br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
break;
case NETDEV_CHANGEMTU:
br->dev->mtu = br_min_mtu(br);
case NETDEV_CHANGE: /* device is up but carrier changed */
if (netif_carrier_ok(dev)) {
if (p->state == BR_STATE_DISABLED)
br_stp_enable_port(p);
} else {
if (p->state != BR_STATE_DISABLED)
br_stp_disable_port(p);
}
break;
case NETDEV_DOWN:
if (br->dev->flags & IFF_UP) {
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
}
br_stp_disable_port(p);
break;
case NETDEV_UP:
if (br->dev->flags & IFF_UP) {
spin_lock_bh(&br->lock);
if (netif_carrier_ok(dev))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
}
}
spin_unlock_bh(&br->lock);
return NOTIFY_DONE;
}
......@@ -46,7 +46,10 @@ struct net_bridge_fdb_entry
{
struct hlist_node hlist;
struct net_bridge_port *dst;
struct list_head age_list;
union {
struct list_head age_list;
struct rcu_head rcu;
};
atomic_t use_count;
unsigned long ageing_timer;
mac_addr addr;
......@@ -86,7 +89,7 @@ struct net_bridge
struct list_head port_list;
struct net_device *dev;
struct net_device_stats statistics;
rwlock_t hash_lock;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
struct list_head age_list;
......@@ -136,8 +139,10 @@ extern void br_fdb_changeaddr(struct net_bridge_port *p,
extern void br_fdb_cleanup(unsigned long arg);
extern void br_fdb_delete_by_port(struct net_bridge *br,
struct net_bridge_port *p);
extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr);
extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
unsigned char *addr);
unsigned char *addr);
extern void br_fdb_put(struct net_bridge_fdb_entry *ent);
extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long count, unsigned long off);
......
......@@ -52,7 +52,7 @@ void br_stp_enable_bridge(struct net_bridge *br)
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
if (p->dev->flags & IFF_UP)
if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev))
br_stp_enable_port(p);
}
......
......@@ -3431,6 +3431,8 @@ EXPORT_SYMBOL(dev_queue_xmit_nit);
EXPORT_SYMBOL(dev_remove_pack);
EXPORT_SYMBOL(dev_set_allmulti);
EXPORT_SYMBOL(dev_set_promiscuity);
EXPORT_SYMBOL(dev_change_flags);
EXPORT_SYMBOL(dev_set_mtu);
EXPORT_SYMBOL(free_netdev);
EXPORT_SYMBOL(netdev_boot_setup_check);
EXPORT_SYMBOL(netdev_set_master);
......@@ -3448,10 +3450,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook);
#endif
/* for 801q VLAN support */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
EXPORT_SYMBOL(dev_change_flags);
#endif
#ifdef CONFIG_KMOD
EXPORT_SYMBOL(dev_load);
#endif
......
......@@ -4,8 +4,6 @@
config IPV6_PRIVACY
bool "IPv6: Privacy Extensions (RFC 3041) support"
depends on IPV6
select CRYPTO
select CRYPTO_MD5
---help---
Privacy Extensions for Stateless Address Autoconfiguration in IPv6
support. With this option, additional periodically-alter
......
......@@ -11,7 +11,7 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
ip6_flowlabel.o ipv6_syms.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
xfrm6_tunnel.o
xfrm6_tunnel.o xfrm6_output.o
ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
......
......@@ -31,6 +31,7 @@
#include <net/ah.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <linux/string.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
......@@ -74,6 +75,45 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
return 0;
}
/**
* ipv6_rearrange_rthdr - rearrange IPv6 routing header
* @iph: IPv6 header
* @rthdr: routing header
*
* Rearrange the destination address in @iph and the addresses in @rthdr
* so that they appear in the order they will at the final destination.
* See Appendix A2 of RFC 2402 for details.
*/
static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
{
int segments, segments_left;
struct in6_addr *addrs;
struct in6_addr final_addr;
segments_left = rthdr->segments_left;
if (segments_left == 0)
return;
rthdr->segments_left = 0;
/* The value of rthdr->hdrlen has been verified either by the system
* call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
* packets. So we can assume that it is even and that segments is
* greater than or equal to segments_left.
*
* For the same reason we can assume that this option is of type 0.
*/
segments = rthdr->hdrlen >> 1;
addrs = ((struct rt0_hdr *)rthdr)->addr;
ipv6_addr_copy(&final_addr, addrs + segments - 1);
addrs += segments - segments_left;
memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
ipv6_addr_copy(addrs, &iph->daddr);
ipv6_addr_copy(&iph->daddr, &final_addr);
}
static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
{
union {
......@@ -101,7 +141,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
break;
case NEXTHDR_ROUTING:
exthdr.rth->segments_left = 0;
ipv6_rearrange_rthdr(iph, exthdr.rth);
break;
default :
......@@ -118,70 +158,55 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
int ah6_output(struct sk_buff **pskb)
{
int err;
int hdr_len = sizeof(struct ipv6hdr);
int extlen;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph = NULL;
struct ipv6hdr *top_iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
u8 nexthdr;
char tmp_base[8];
struct {
struct in6_addr daddr;
char hdrs[0];
} *tmp_ext;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
top_iph = (struct ipv6hdr *)(*pskb)->data;
top_iph->payload_len = htons((*pskb)->len - sizeof(*top_iph));
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
nexthdr = *(*pskb)->nh.raw;
*(*pskb)->nh.raw = IPPROTO_AH;
if (x->props.mode) {
err = xfrm6_tunnel_check_size(*pskb);
if (err)
goto error;
iph = (*pskb)->nh.ipv6h;
(*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
(*pskb)->nh.ipv6h->version = 6;
(*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
(*pskb)->nh.ipv6h->nexthdr = IPPROTO_AH;
ipv6_addr_copy(&(*pskb)->nh.ipv6h->saddr,
(struct in6_addr *) &x->props.saddr);
ipv6_addr_copy(&(*pskb)->nh.ipv6h->daddr,
(struct in6_addr *) &x->id.daddr);
ah = (struct ip_auth_hdr*)((*pskb)->nh.ipv6h+1);
ah->nexthdr = IPPROTO_IPV6;
} else {
u8 *prevhdr;
hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = IPPROTO_AH;
iph = kmalloc(hdr_len, GFP_ATOMIC);
if (!iph) {
/* When there are no extension headers, we only need to save the first
* 8 bytes of the base IP header.
*/
memcpy(tmp_base, top_iph, sizeof(tmp_base));
tmp_ext = NULL;
extlen = (*pskb)->h.raw - (unsigned char *)(top_iph + 1);
if (extlen) {
extlen += sizeof(*tmp_ext);
tmp_ext = kmalloc(extlen, GFP_ATOMIC);
if (!tmp_ext) {
err = -ENOMEM;
goto error;
}
memcpy(iph, (*pskb)->data, hdr_len);
(*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
err = ipv6_clear_mutable_options((*pskb)->nh.ipv6h, hdr_len);
memcpy(tmp_ext, &top_iph->daddr, extlen);
err = ipv6_clear_mutable_options(top_iph,
extlen - sizeof(*tmp_ext) +
sizeof(*top_iph));
if (err)
goto error_free_iph;
ah = (struct ip_auth_hdr*)((*pskb)->nh.raw+hdr_len);
(*pskb)->h.raw = (unsigned char*) ah;
ah->nexthdr = nexthdr;
}
(*pskb)->nh.ipv6h->priority = 0;
(*pskb)->nh.ipv6h->flow_lbl[0] = 0;
(*pskb)->nh.ipv6h->flow_lbl[1] = 0;
(*pskb)->nh.ipv6h->flow_lbl[2] = 0;
(*pskb)->nh.ipv6h->hop_limit = 0;
ah = (struct ip_auth_hdr *)(*pskb)->h.raw;
ah->nexthdr = nexthdr;
top_iph->priority = 0;
top_iph->flow_lbl[0] = 0;
top_iph->flow_lbl[1] = 0;
top_iph->flow_lbl[2] = 0;
top_iph->hop_limit = 0;
ahp = x->data;
ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) +
......@@ -192,35 +217,16 @@ int ah6_output(struct sk_buff **pskb)
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, *pskb, ah->auth_data);
if (x->props.mode) {
(*pskb)->nh.ipv6h->hop_limit = iph->hop_limit;
(*pskb)->nh.ipv6h->priority = iph->priority;
(*pskb)->nh.ipv6h->flow_lbl[0] = iph->flow_lbl[0];
(*pskb)->nh.ipv6h->flow_lbl[1] = iph->flow_lbl[1];
(*pskb)->nh.ipv6h->flow_lbl[2] = iph->flow_lbl[2];
if (x->props.flags & XFRM_STATE_NOECN)
IP6_ECN_clear((*pskb)->nh.ipv6h);
} else {
memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
kfree (iph);
}
(*pskb)->nh.raw = (*pskb)->data;
err = 0;
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
memcpy(top_iph, tmp_base, sizeof(tmp_base));
if (tmp_ext) {
memcpy(&top_iph->daddr, tmp_ext, extlen);
error_free_iph:
kfree(iph);
kfree(tmp_ext);
}
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
return err;
}
......
......@@ -41,10 +41,10 @@
int esp6_output(struct sk_buff **pskb)
{
int err;
int hdr_len = 0;
int hdr_len;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph = NULL, *top_iph;
struct ipv6hdr *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
......@@ -53,37 +53,13 @@ int esp6_output(struct sk_buff **pskb)
int clen;
int alen;
int nfrags;
u8 *prevhdr;
u8 nexthdr = 0;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
esp = x->data;
hdr_len = (*pskb)->h.raw - (*pskb)->data +
sizeof(*esph) + esp->conf.ivlen;
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm6_tunnel_check_size(*pskb);
if (err)
goto error;
} else {
/* Strip IP header in transport mode. Save it. */
hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = IPPROTO_ESP;
iph = kmalloc(hdr_len, GFP_ATOMIC);
if (!iph) {
err = -ENOMEM;
goto error;
}
memcpy(iph, (*pskb)->nh.raw, hdr_len);
__skb_pull(*pskb, hdr_len);
}
/* Strip IP+ESP header. */
__skb_pull(*pskb, hdr_len);
/* Now skb is pure payload to encrypt */
err = -ENOMEM;
......@@ -91,7 +67,6 @@ int esp6_output(struct sk_buff **pskb)
/* Round to block size */
clen = (*pskb)->len;
esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
......@@ -100,7 +75,6 @@ int esp6_output(struct sk_buff **pskb)
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
if ((nfrags = skb_cow_data(*pskb, clen-(*pskb)->len+alen, &trailer)) < 0) {
if (!x->props.mode && iph) kfree(iph);
goto error;
}
......@@ -113,34 +87,11 @@ int esp6_output(struct sk_buff **pskb)
*(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
pskb_put(*pskb, trailer, clen - (*pskb)->len);
if (x->props.mode) {
iph = (*pskb)->nh.ipv6h;
top_iph = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
esph = (struct ipv6_esp_hdr*)(top_iph+1);
*(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
top_iph->version = 6;
top_iph->priority = iph->priority;
top_iph->flow_lbl[0] = iph->flow_lbl[0];
top_iph->flow_lbl[1] = iph->flow_lbl[1];
top_iph->flow_lbl[2] = iph->flow_lbl[2];
if (x->props.flags & XFRM_STATE_NOECN)
IP6_ECN_clear(top_iph);
top_iph->nexthdr = IPPROTO_ESP;
top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
top_iph->hop_limit = iph->hop_limit;
ipv6_addr_copy(&top_iph->saddr,
(struct in6_addr *)&x->props.saddr);
ipv6_addr_copy(&top_iph->daddr,
(struct in6_addr *)&x->id.daddr);
} else {
esph = (struct ipv6_esp_hdr*)skb_push(*pskb, x->props.header_len);
(*pskb)->h.raw = (unsigned char*)esph;
top_iph = (struct ipv6hdr*)skb_push(*pskb, hdr_len);
memcpy(top_iph, iph, hdr_len);
kfree(iph);
top_iph->payload_len = htons((*pskb)->len + alen - sizeof(struct ipv6hdr));
*(u8*)(trailer->tail - 1) = nexthdr;
}
top_iph = (struct ipv6hdr *)__skb_push(*pskb, hdr_len);
esph = (struct ipv6_esp_hdr *)(*pskb)->h.raw;
top_iph->payload_len = htons((*pskb)->len + alen - sizeof(*top_iph));
*(u8*)(trailer->tail - 1) = *(*pskb)->nh.raw;
*(*pskb)->nh.raw = IPPROTO_ESP;
esph->spi = x->id.spi;
esph->seq_no = htonl(++x->replay.oseq);
......@@ -173,21 +124,9 @@ int esp6_output(struct sk_buff **pskb)
pskb_put(*pskb, trailer, alen);
}
(*pskb)->nh.raw = (*pskb)->data;
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
err = 0;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
return err;
}
......
......@@ -94,7 +94,7 @@ static __u32 rt_sernum;
static struct timer_list ip6_fib_timer = TIMER_INITIALIZER(fib6_run_gc, 0, 0);
static struct fib6_walker_t fib6_walker_list = {
struct fib6_walker_t fib6_walker_list = {
.prev = &fib6_walker_list,
.next = &fib6_walker_list,
};
......
......@@ -120,52 +120,14 @@ static int ipcomp6_output(struct sk_buff **pskb)
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph, *top_iph;
int hdr_len = 0;
struct ipv6hdr *top_iph;
int hdr_len;
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
u8 *prevhdr;
u8 nexthdr = 0;
int plen, dlen;
u8 *start, *scratch = ipcd->scratch;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm6_tunnel_check_size(*pskb);
if (err)
goto error;
hdr_len = sizeof(struct ipv6hdr);
nexthdr = IPPROTO_IPV6;
iph = (*pskb)->nh.ipv6h;
top_iph = (struct ipv6hdr *)skb_push(*pskb, sizeof(struct ipv6hdr));
top_iph->version = 6;
top_iph->priority = iph->priority;
top_iph->flow_lbl[0] = iph->flow_lbl[0];
top_iph->flow_lbl[1] = iph->flow_lbl[1];
top_iph->flow_lbl[2] = iph->flow_lbl[2];
top_iph->nexthdr = IPPROTO_IPV6; /* initial */
top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
top_iph->hop_limit = iph->hop_limit;
memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
(*pskb)->nh.raw = (*pskb)->data; /* == top_iph */
(*pskb)->h.raw = (*pskb)->nh.raw + hdr_len;
} else {
hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
nexthdr = *prevhdr;
}
hdr_len = (*pskb)->h.raw - (*pskb)->data;
/* check whether datagram len is larger than threshold */
if (((*pskb)->len - hdr_len) < ipcd->threshold) {
......@@ -181,7 +143,7 @@ static int ipcomp6_output(struct sk_buff **pskb)
/* compression */
plen = (*pskb)->len - hdr_len;
dlen = IPCOMP_SCRATCH_SIZE;
start = (*pskb)->data + hdr_len;
start = (*pskb)->h.raw;
err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
if (err) {
......@@ -194,39 +156,21 @@ static int ipcomp6_output(struct sk_buff **pskb)
pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
top_iph = (*pskb)->nh.ipv6h;
top_iph = (struct ipv6hdr *)(*pskb)->data;
if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
IP6_ECN_clear(top_iph);
top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
(*pskb)->nh.raw = (*pskb)->data; /* top_iph */
ip6_find_1stfragopt(*pskb, &prevhdr);
*prevhdr = IPPROTO_COMP;
ipch = (struct ipv6_comp_hdr *)((unsigned char *)top_iph + hdr_len);
ipch->nexthdr = nexthdr;
ipch = (struct ipv6_comp_hdr *)start;
ipch->nexthdr = *(*pskb)->nh.raw;
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
*(*pskb)->nh.raw = IPPROTO_COMP;
(*pskb)->h.raw = (unsigned char*)ipch;
out_ok:
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
err = NET_XMIT_BYPASS;
err = 0;
out_exit:
return err;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
goto out_exit;
return err;
}
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
......
/*
* xfrm6_output.c - Common IPsec encapsulation code for IPv6.
* Copyright (C) 2002 USAGI/WIDE Project
* Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <net/inet_ecn.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
/* Add encapsulation header.
*
* In transport mode, the IP header and mutable extension headers will be moved
* forward to make space for the encapsulation header.
*
* In tunnel mode, the top IP header will be constructed per RFC 2401.
* The following fields in it shall be filled in by x->type->output:
* payload_len
*
* On exit, skb->h will be set to the start of the encapsulation header to be
* filled in by x->type->output and skb->nh will be set to the nextheader field
* of the extension header directly preceding the encapsulation header, or in
* its absence, that of the top IP header. The value of skb->data will always
* point to the top IP header.
*/
static void xfrm6_encap(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph, *top_iph;
skb_push(skb, x->props.header_len);
iph = skb->nh.ipv6h;
if (!x->props.mode) {
u8 *prevhdr;
int hdr_len;
hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
skb->nh.raw = prevhdr - x->props.header_len;
skb->h.raw = skb->data + hdr_len;
memmove(skb->data, iph, hdr_len);
return;
}
skb->nh.raw = skb->data;
top_iph = skb->nh.ipv6h;
skb->nh.raw = &top_iph->nexthdr;
skb->h.ipv6h = top_iph + 1;
top_iph->version = 6;
top_iph->priority = iph->priority;
if (x->props.flags & XFRM_STATE_NOECN)
IP6_ECN_clear(top_iph);
top_iph->flow_lbl[0] = iph->flow_lbl[0];
top_iph->flow_lbl[1] = iph->flow_lbl[1];
top_iph->flow_lbl[2] = iph->flow_lbl[2];
top_iph->nexthdr = IPPROTO_IPV6;
top_iph->hop_limit = iph->hop_limit;
ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
}
int xfrm6_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
int err;
if (skb->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
skb = *pskb;
if (err)
goto error_nolock;
}
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, skb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm6_tunnel_check_size(skb);
if (err)
goto error;
}
xfrm6_encap(skb);
err = x->type->output(pskb);
skb = *pskb;
if (err)
goto error;
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
skb->nh.raw = skb->data;
if (!(skb->dst = dst_pop(dst))) {
err = -EHOSTUNREACH;
goto error_nolock;
}
err = NET_XMIT_BYPASS;
out_exit:
return err;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
goto out_exit;
}
......@@ -157,7 +157,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
/* Copy neighbour for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
dst_prev->output = dst_prev->xfrm->type->output;
dst_prev->output = xfrm6_output;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
......
......@@ -16,7 +16,7 @@
#include <linux/ipsec.h>
#include <net/ipv6.h>
extern struct xfrm_state_afinfo xfrm6_state_afinfo;
static struct xfrm_state_afinfo xfrm6_state_afinfo;
static void
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
......
......@@ -365,46 +365,12 @@ EXPORT_SYMBOL(xfrm6_tunnel_check_size);
static int xfrm6_tunnel_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph, *top_iph;
int err;
if ((err = xfrm6_tunnel_check_size(skb)) != 0)
goto error_nolock;
iph = skb->nh.ipv6h;
top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len);
top_iph->version = 6;
top_iph->priority = iph->priority;
top_iph->flow_lbl[0] = iph->flow_lbl[0];
top_iph->flow_lbl[1] = iph->flow_lbl[1];
top_iph->flow_lbl[2] = iph->flow_lbl[2];
top_iph->nexthdr = IPPROTO_IPV6;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
top_iph->hop_limit = iph->hop_limit;
memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
skb->nh.raw = skb->data;
skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
struct ipv6hdr *top_iph;
if ((skb->dst = dst_pop(dst)) == NULL) {
kfree_skb(skb);
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
top_iph = (struct ipv6hdr *)skb->data;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
error_nolock:
kfree_skb(skb);
return err;
return 0;
}
static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
......
......@@ -10,6 +10,7 @@
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
......@@ -1451,8 +1452,7 @@ static int __init nr_proto_init(void)
module_init(nr_proto_init);
MODULE_PARM(nr_ndevs, "i");
module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
......
......@@ -11,6 +11,7 @@
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
......@@ -57,7 +58,7 @@ int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
HLIST_HEAD(rose_list);
static HLIST_HEAD(rose_list);
spinlock_t rose_list_lock = SPIN_LOCK_UNLOCKED;
static struct proto_ops rose_proto_ops;
......@@ -1550,7 +1551,7 @@ static int __init rose_proto_init(void)
}
module_init(rose_proto_init);
MODULE_PARM(rose_ndevs, "i");
module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
......
......@@ -64,17 +64,20 @@ struct tc_u_knode
struct tc_u_hnode *ht_up;
#ifdef CONFIG_NET_CLS_ACT
struct tc_action *action;
#ifdef CONFIG_NET_CLS_IND
char indev[IFNAMSIZ];
#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
struct tcf_police *police;
#endif
#endif
#ifdef CONFIG_NET_CLS_IND
char indev[IFNAMSIZ];
#endif
u8 fshift;
struct tcf_result res;
struct tc_u_hnode *ht_down;
#ifdef CONFIG_CLS_U32_PERF
struct tc_u32_pcnt *pf;
#endif
struct tc_u32_sel sel;
};
......@@ -120,6 +123,9 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
int sdepth = 0;
int off2 = 0;
int sel = 0;
#ifdef CONFIG_CLS_U32_PERF
int j;
#endif
int i;
next_ht:
......@@ -130,7 +136,8 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
n->sel.rcnt +=1;
n->pf->rcnt +=1;
j = 0;
#endif
for (i = n->sel.nkeys; i>0; i--, key++) {
......@@ -139,7 +146,8 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
key->kcnt +=1;
n->pf->kcnts[j] +=1;
j++;
#endif
}
if (n->ht_down == NULL) {
......@@ -147,7 +155,6 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
if (n->sel.flags&TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_ACT
#ifdef CONFIG_NET_CLS_IND
/* yes, i know it sucks but the feature is
** optional dammit! - JHS */
......@@ -164,8 +171,9 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
}
#endif
#ifdef CONFIG_CLS_U32_PERF
n->sel.rhit +=1;
n->pf->rhit +=1;
#endif
#ifdef CONFIG_NET_CLS_ACT
if (n->action) {
int pol_res = tcf_action_exec(skb, n->action);
if (skb->tc_classid > 0) {
......@@ -358,6 +366,10 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
#endif
if (n->ht_down)
n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF
if (n && (NULL != n->pf))
kfree(n->pf);
#endif
kfree(n);
return 0;
}
......@@ -571,18 +583,6 @@ static int u32_set_parms(struct Qdisc *q, unsigned long base,
tcf_action_destroy(act, TCA_ACT_UNBIND);
}
#ifdef CONFIG_NET_CLS_IND
n->indev[0] = 0;
if(tb[TCA_U32_INDEV-1]) {
struct rtattr *input_dev = tb[TCA_U32_INDEV-1];
if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) {
printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev));
/* should we clear state first? */
return -EINVAL;
}
sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
}
#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
......@@ -594,6 +594,19 @@ static int u32_set_parms(struct Qdisc *q, unsigned long base,
tcf_police_release(police, TCA_ACT_UNBIND);
}
#endif
#endif
#ifdef CONFIG_NET_CLS_IND
n->indev[0] = 0;
if(tb[TCA_U32_INDEV-1]) {
struct rtattr *input_dev = tb[TCA_U32_INDEV-1];
if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) {
printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev));
/* should we clear state first? */
return -EINVAL;
}
sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
printk("got IND %s\n",n->indev);
}
#endif
return 0;
......@@ -682,17 +695,20 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
s = RTA_DATA(tb[TCA_U32_SEL-1]);
#ifdef CONFIG_CLS_U32_PERF
if (RTA_PAYLOAD(tb[TCA_U32_SEL-1]) <
(s->nkeys*sizeof(struct tc_u32_key)) + sizeof(struct tc_u32_sel)) {
printk("Please upgrade your iproute2 tools or compile proper options in!\n");
return -EINVAL;
}
#endif
n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
if (n == NULL)
return -ENOBUFS;
memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
#ifdef CONFIG_CLS_U32_PERF
n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL);
if (n->pf == NULL) {
kfree(n);
return -ENOBUFS;
}
memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
......@@ -721,6 +737,10 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
*arg = (unsigned long)n;
return 0;
}
#ifdef CONFIG_CLS_U32_PERF
if (n && (NULL != n->pf))
kfree(n->pf);
#endif
kfree(n);
return err;
}
......@@ -812,13 +832,6 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
#ifdef CONFIG_NET_CLS_IND
if(strlen(n->indev)) {
struct rtattr * p_rta = (struct rtattr*)skb->tail;
RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
#endif
#else
#ifdef CONFIG_NET_CLS_POLICE
......@@ -833,14 +846,29 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
}
#endif
#endif
#ifdef CONFIG_NET_CLS_IND
if(strlen(n->indev)) {
struct rtattr * p_rta = (struct rtattr*)skb->tail;
RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
p_rta->rta_len = skb->tail - (u8*)p_rta;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
RTA_PUT(skb, TCA_U32_PCNT,
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64),
n->pf);
#endif
}
rta->rta_len = skb->tail - b;
#ifdef CONFIG_NET_CLS_ACT
if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
if (tcf_action_copy_stats(skb,n->action))
goto rtattr_failure;
if (TC_U32_KEY(n->handle) != 0) {
if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
if (tcf_action_copy_stats(skb,n->action))
goto rtattr_failure;
}
}
#else
#ifdef CONFIG_NET_CLS_POLICE
......@@ -875,6 +903,19 @@ static struct tcf_proto_ops cls_u32_ops = {
static int __init init_u32(void)
{
printk("u32 classifier\n");
#ifdef CONFIG_CLS_U32_PERF
printk(" Perfomance counters on\n");
#endif
#ifdef CONFIG_NET_CLS_POLICE
printk(" OLD policer on \n");
#endif
#ifdef CONFIG_NET_CLS_IND
printk(" input device check on \n");
#endif
#ifdef CONFIG_NET_CLS_ACT
printk(" Actions configured \n");
#endif
return register_tcf_proto_ops(&cls_u32_ops);
}
......
......@@ -8,6 +8,10 @@ menu "SCTP Configuration (EXPERIMENTAL)"
config IP_SCTP
tristate "The SCTP Protocol (EXPERIMENTAL)"
depends on IPV6 || IPV6=n
select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5
select CRYPTO_SHA1 if SCTP_HMAC_SHA1
select CRYPTO_MD5 if SCTP_HMAC_MD5
---help---
Stream Control Transmission Protocol
......@@ -71,18 +75,12 @@ config SCTP_HMAC_NONE
config SCTP_HMAC_SHA1
bool "HMAC-SHA1"
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
help
Enable the use of HMAC-SHA1 during association establishment. It
is advised to use either HMAC-MD5 or HMAC-SHA1.
config SCTP_HMAC_MD5
bool "HMAC-MD5"
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
help
Enable the use of HMAC-MD5 during association establishment. It is
advised to use either HMAC-MD5 or HMAC-SHA1.
......
......@@ -1296,21 +1296,6 @@ xprt_transmit(struct rpc_task *task)
/*
* Reserve an RPC call slot.
*/
void
xprt_reserve(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
task->tk_status = -EIO;
if (!xprt->shutdown) {
spin_lock(&xprt->xprt_lock);
do_xprt_reserve(task);
spin_unlock(&xprt->xprt_lock);
if (task->tk_rqstp)
del_timer_sync(&xprt->timer);
}
}
static inline void
do_xprt_reserve(struct rpc_task *task)
{
......@@ -1332,6 +1317,21 @@ do_xprt_reserve(struct rpc_task *task)
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
}
void
xprt_reserve(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
task->tk_status = -EIO;
if (!xprt->shutdown) {
spin_lock(&xprt->xprt_lock);
do_xprt_reserve(task);
spin_unlock(&xprt->xprt_lock);
if (task->tk_rqstp)
del_timer_sync(&xprt->timer);
}
}
/*
* Allocate a 'unique' XID
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment