Commit 1eb0a9ec authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents bab7bf66 584d3928
......@@ -55,6 +55,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <asm/byteorder.h>
......
......@@ -15,6 +15,7 @@
*/
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include "internal.h"
......
......@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
......
......@@ -13,6 +13,7 @@
*/
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
......
......@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
......
......@@ -40,6 +40,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
......
......@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/atm_idt77105.h>
#include <linux/spinlock.h>
#include <asm/system.h>
#include <asm/param.h>
#include <asm/uaccess.h>
......@@ -38,6 +39,7 @@ struct idt77105_priv {
unsigned char old_mcr; /* storage of MCR reg while signal lost */
};
static spinlock_t idt77105_priv_lock = SPIN_LOCK_UNLOCKED;
#define PRIV(dev) ((struct idt77105_priv *) dev->phy_data)
......@@ -144,12 +146,11 @@ static int fetch_stats(struct atm_dev *dev,struct idt77105_stats *arg,int zero)
unsigned long flags;
struct idt77105_stats stats;
save_flags(flags);
cli();
spin_lock_irqsave(&idt77105_priv_lock, flags);
memcpy(&stats, &PRIV(dev)->stats, sizeof(struct idt77105_stats));
if (zero)
memset(&PRIV(dev)->stats, 0, sizeof(struct idt77105_stats));
restore_flags(flags);
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
if (arg == NULL)
return 0;
return copy_to_user(arg, &PRIV(dev)->stats,
......@@ -267,11 +268,10 @@ static int idt77105_start(struct atm_dev *dev)
if (!(PRIV(dev) = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL)))
return -ENOMEM;
PRIV(dev)->dev = dev;
save_flags(flags);
cli();
spin_lock_irqsave(&idt77105_priv_lock, flags);
PRIV(dev)->next = idt77105_all;
idt77105_all = PRIV(dev);
restore_flags(flags);
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
memset(&PRIV(dev)->stats,0,sizeof(struct idt77105_stats));
/* initialise dev->signal from Good Signal Bit */
......@@ -305,11 +305,9 @@ static int idt77105_start(struct atm_dev *dev)
idt77105_stats_timer_func(0); /* clear 77105 counters */
(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
cli();
if (!start_timer) restore_flags(flags);
else {
spin_lock_irqsave(&idt77105_priv_lock, flags);
if (start_timer) {
start_timer = 0;
restore_flags(flags);
init_timer(&stats_timer);
stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD;
......@@ -321,32 +319,11 @@ static int idt77105_start(struct atm_dev *dev)
restart_timer.function = idt77105_restart_timer_func;
add_timer(&restart_timer);
}
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
return 0;
}
static const struct atmphy_ops idt77105_ops = {
idt77105_start,
idt77105_ioctl,
idt77105_int
};
int __init idt77105_init(struct atm_dev *dev)
{
MOD_INC_USE_COUNT;
dev->phy = &idt77105_ops;
return 0;
}
/*
* TODO: this function should be called through phy_ops
* but that will not be possible for some time as there is
* currently a freeze on modifying that structure
* -- Greg Banks, 13 Sep 1999
*/
int idt77105_stop(struct atm_dev *dev)
{
struct idt77105_priv *walk, *prev;
......@@ -372,30 +349,33 @@ int idt77105_stop(struct atm_dev *dev)
}
}
MOD_DEC_USE_COUNT;
return 0;
}
static const struct atmphy_ops idt77105_ops = {
.start = idt77105_start,
.ioctl = idt77105_ioctl,
.interrupt = idt77105_int,
.stop = idt77105_stop,
};
EXPORT_SYMBOL(idt77105_init);
EXPORT_SYMBOL(idt77105_stop);
MODULE_LICENSE("GPL");
#ifdef MODULE
int init_module(void)
int idt77105_init(struct atm_dev *dev)
{
dev->phy = &idt77105_ops;
return 0;
}
EXPORT_SYMBOL(idt77105_init);
void cleanup_module(void)
static void __exit idt77105_exit(void)
{
/* turn off timers */
del_timer(&stats_timer);
del_timer(&restart_timer);
}
#endif
module_exit(idt77105_exit);
MODULE_LICENSE("GPL");
......@@ -354,11 +354,8 @@ static void __exit nicstar_module_exit(void)
card = cards[i];
#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
if (card->max_pcr == ATM_25_PCR) {
idt77105_stop(card->atmdev);
}
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
if (card->atmdev->phy && card->atmdev->phy->stop)
card->atmdev->phy->stop(card->atmdev);
/* Stop everything */
writel(0x00000000, card->membase + CFG);
......@@ -490,11 +487,6 @@ static int __init ns_init_card(int i, struct pci_dev *pcidev)
card->atmdev = NULL;
card->pcidev = pcidev;
card->membase = pci_resource_start(pcidev, 1);
#ifdef __powerpc__
/* Compensate for different memory map between host CPU and PCI bus.
Shouldn't we use a macro for this? */
card->membase += KERNELBASE;
#endif /* __powerpc__ */
card->membase = (unsigned long) ioremap(card->membase, NS_IOREMAP_SIZE);
if (card->membase == 0)
{
......@@ -905,22 +897,13 @@ static int __init ns_init_card(int i, struct pci_dev *pcidev)
card->atmdev->phy = NULL;
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
if (card->max_pcr == ATM_OC3_PCR) {
if (card->max_pcr == ATM_OC3_PCR)
suni_init(card->atmdev);
MOD_INC_USE_COUNT;
/* Can't remove the nicstar driver or the suni driver would oops */
}
#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
if (card->max_pcr == ATM_25_PCR) {
if (card->max_pcr == ATM_25_PCR)
idt77105_init(card->atmdev);
/* Note that for the IDT77105 PHY we don't need the awful
* module count hack that the SUNI needs because we can
* stop the '105 when the nicstar module is cleaned up.
*/
}
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
if (card->atmdev->phy && card->atmdev->phy->start)
......@@ -2327,6 +2310,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
{
push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
0, 0);
atomic_inc(&vcc->stats->rx_drop);
}
else
{
......@@ -2354,6 +2338,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
{
push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
0, 0);
atomic_inc(&vcc->stats->rx_drop);
}
else
{
......@@ -2378,6 +2363,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
{
push_rxbufs(card, BUF_LG, (u32) skb,
(u32) virt_to_bus(skb->data), 0, 0);
atomic_inc(&vcc->stats->rx_drop);
}
else
{
......@@ -2462,6 +2448,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
}
else
dev_kfree_skb_any(hb);
atomic_inc(&vcc->stats->rx_drop);
}
else
{
......
......@@ -297,7 +297,7 @@ int suni_init(struct atm_dev *dev)
mri = GET(MRI); /* reset SUNI */
PUT(mri | SUNI_MRI_RESET,MRI);
PUT(mri,MRI);
PUT(0,MT); /* disable all tests */
PUT((GET(MT) & SUNI_MT_DS27_53),MT); /* disable all tests */
REG_CHANGE(SUNI_TPOP_APM_S,SUNI_TPOP_APM_S_SHIFT,SUNI_TPOP_S_SONET,
TPOP_APM); /* use SONET */
REG_CHANGE(SUNI_TACP_IUCHP_CLP,0,SUNI_TACP_IUCHP_CLP,
......@@ -307,24 +307,6 @@ int suni_init(struct atm_dev *dev)
return 0;
}
EXPORT_SYMBOL(suni_init);
MODULE_LICENSE("GPL");
#ifdef MODULE
int init_module(void)
{
return 0;
}
void cleanup_module(void)
{
/* Nay */
}
#endif
......@@ -198,6 +198,7 @@
#define SUNI_MT_IOTST 0x04 /* RW, enable test mode */
#define SUNI_MT_DBCTRL 0x08 /* W, control data bus by CSB pin */
#define SUNI_MT_PMCTST 0x10 /* W, PMC test mode */
#define SUNI_MT_DS27_53 0x80 /* RW, select between 8- or 16- bit */
#define SUNI_IDLE_PATTERN 0x6a /* idle pattern */
......
......@@ -180,5 +180,8 @@ struct in6_flowlabel_req
#define IPV6_FLOWLABEL_MGR 32
#define IPV6_FLOWINFO_SEND 33
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
#endif
......@@ -260,6 +260,7 @@ struct ebt_table
unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
struct module *me;
};
extern int ebt_register_table(struct ebt_table *table);
......
......@@ -262,6 +262,14 @@ struct sadb_x_ipsecrequest {
#define SADB_X_EALG_AESCBC 12
#define SADB_EALG_MAX 12
/* Compression algorithms */
#define SADB_X_CALG_NONE 0
#define SADB_X_CALG_OUI 1
#define SADB_X_CALG_DEFLATE 2
#define SADB_X_CALG_LZS 3
#define SADB_X_CALG_LZJH 4
#define SADB_X_CALG_MAX 4
/* Extension Header values */
#define SADB_EXT_RESERVED 0
#define SADB_EXT_SA 1
......
......@@ -12,12 +12,7 @@
*/
typedef union
{
struct {
__u32 addr;
__u32 mask; /* Use unused bits to cache mask. */
} a4;
#define xfrm4_addr a4.addr
#define xfrm4_mask a4.mask
__u32 a4;
__u32 a6[4];
} xfrm_address_t;
......
This diff is collapsed.
......@@ -49,6 +49,7 @@ static struct ebt_table broute_table =
.valid_hooks = 1 << NF_BR_BROUTING,
.lock = RW_LOCK_UNLOCKED,
.check = check,
.me = THIS_MODULE,
};
static int ebt_broute(struct sk_buff **pskb)
......
......@@ -57,6 +57,7 @@ static struct ebt_table frame_filter =
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.check = check,
.me = THIS_MODULE,
};
static unsigned int
......
......@@ -56,6 +56,7 @@ static struct ebt_table frame_nat =
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.check = check,
.me = THIS_MODULE,
};
static unsigned int
......
......@@ -969,8 +969,10 @@ static int do_replace(void *user, unsigned int len)
goto free_counterstmp;
t = find_table_lock(tmp.name, &ret, &ebt_mutex);
if (!t)
if (!t) {
ret = -ENOENT;
goto free_iterate;
}
/* the table doesn't like it */
if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
......@@ -984,6 +986,12 @@ static int do_replace(void *user, unsigned int len)
/* we have the mutex lock, so no danger in reading this pointer */
table = t->private;
/* make sure the table can only be rmmod'ed if it contains no rules */
if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
ret = -ENOENT;
goto free_unlock;
} else if (table->nentries && !newinfo->nentries)
module_put(t->me);
/* we need an atomic snapshot of the counters */
write_lock_bh(&t->lock);
if (tmp.num_counters)
......@@ -1168,6 +1176,11 @@ int ebt_register_table(struct ebt_table *table)
goto free_unlock;
}
/* Hold a reference count if the chains aren't empty */
if (newinfo->nentries && !try_module_get(table->me)) {
ret = -ENOENT;
goto free_unlock;
}
list_prepend(&ebt_tables, table);
up(&ebt_mutex);
return 0;
......@@ -1196,8 +1209,6 @@ void ebt_unregister_table(struct ebt_table *table)
down(&ebt_mutex);
LIST_DELETE(&ebt_tables, table);
up(&ebt_mutex);
EBT_ENTRY_ITERATE(table->private->entries,
table->private->entries_size, ebt_cleanup_entry, NULL);
if (table->private->entries)
vfree(table->private->entries);
if (table->private->chainstack) {
......
......@@ -22,4 +22,4 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
obj-y += xfrm_policy.o xfrm_state.o xfrm_input.o xfrm_algo.o
obj-y += xfrm_policy.o xfrm4_policy.o xfrm_state.o xfrm4_state.o xfrm_input.o xfrm4_input.o xfrm_algo.o
......@@ -92,8 +92,8 @@ static int ah_output(struct sk_buff *skb)
top_iph->ttl = 0;
top_iph->protocol = IPPROTO_AH;
top_iph->check = 0;
top_iph->saddr = x->props.saddr.xfrm4_addr;
top_iph->daddr = x->id.daddr.xfrm4_addr;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
ah = (struct ip_auth_hdr*)(top_iph+1);
ah->nexthdr = IPPROTO_IPIP;
} else {
......@@ -232,7 +232,7 @@ void ah4_err(struct sk_buff *skb, u32 info)
skb->h.icmph->code != ICMP_FRAG_NEEDED)
return;
x = xfrm4_state_lookup(iph->daddr, ah->spi, IPPROTO_AH);
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA AH/%08x/%08x\n",
......@@ -338,13 +338,13 @@ static struct inet_protocol ah4_protocol = {
static int __init ah4_init(void)
{
SET_MODULE_OWNER(&ah_type);
if (xfrm_register_type(&ah_type) < 0) {
if (xfrm_register_type(&ah_type, AF_INET) < 0) {
printk(KERN_INFO "ip ah init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
printk(KERN_INFO "ip ah init: can't add protocol\n");
xfrm_unregister_type(&ah_type);
xfrm_unregister_type(&ah_type, AF_INET);
return -EAGAIN;
}
return 0;
......@@ -354,7 +354,7 @@ static void __exit ah4_fini(void)
{
if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
printk(KERN_INFO "ip ah close: can't remove protocol\n");
if (xfrm_unregister_type(&ah_type) < 0)
if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
}
......
......@@ -91,8 +91,8 @@ int esp_output(struct sk_buff *skb)
top_iph->ttl = iph->ttl; /* TTL disclosed */
top_iph->protocol = IPPROTO_ESP;
top_iph->check = 0;
top_iph->saddr = x->props.saddr.xfrm4_addr;
top_iph->daddr = x->id.daddr.xfrm4_addr;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
} else {
esph = (struct ip_esp_hdr*)skb_push(skb, x->props.header_len);
......@@ -276,7 +276,7 @@ void esp4_err(struct sk_buff *skb, u32 info)
skb->h.icmph->code != ICMP_FRAG_NEEDED)
return;
x = xfrm4_state_lookup(iph->daddr, esph->spi, IPPROTO_ESP);
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA ESP/%08x/%08x\n",
......@@ -405,13 +405,13 @@ static struct inet_protocol esp4_protocol = {
int __init esp4_init(void)
{
SET_MODULE_OWNER(&esp_type);
if (xfrm_register_type(&esp_type) < 0) {
if (xfrm_register_type(&esp_type, AF_INET) < 0) {
printk(KERN_INFO "ip esp init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
printk(KERN_INFO "ip esp init: can't add protocol\n");
xfrm_unregister_type(&esp_type);
xfrm_unregister_type(&esp_type, AF_INET);
return -EAGAIN;
}
return 0;
......@@ -421,7 +421,7 @@ static void __exit esp4_fini(void)
{
if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
printk(KERN_INFO "ip esp close: can't remove protocol\n");
if (xfrm_unregister_type(&esp_type) < 0)
if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
}
......
......@@ -60,7 +60,7 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options * opt = &(IPCB(skb)->opt);
if (!xfrm_policy_check(NULL, XFRM_POLICY_FWD, skb))
if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb))
goto drop;
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
......@@ -82,7 +82,7 @@ int ip_forward(struct sk_buff *skb)
if (iph->ttl <= 1)
goto too_many_hops;
if (!xfrm_route_forward(skb))
if (!xfrm4_route_forward(skb))
goto drop;
iph = skb->nh.iph;
......
......@@ -236,7 +236,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
int ret;
if (!ipprot->no_policy &&
!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb)) {
!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return 0;
}
......@@ -248,7 +248,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
IP_INC_STATS_BH(IpInDelivers);
} else {
if (!raw_sk) {
if (xfrm_policy_check(NULL, XFRM_POLICY_IN, skb)) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(IpInUnknownProtos);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
......
......@@ -251,7 +251,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb)) {
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
......
......@@ -2600,13 +2600,6 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
#endif /* CONFIG_PROC_FS */
#endif /* CONFIG_NET_CLS_ROUTE */
int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
int err = 0;
err = __ip_route_output_key((struct rtable**)dst, fl);
return err;
}
int __init ip_rt_init(void)
{
int i, order, goal, rc = 0;
......@@ -2688,7 +2681,6 @@ int __init ip_rt_init(void)
ip_rt_gc_interval;
add_timer(&rt_periodic_timer);
xfrm_dst_lookup_register(xfrm_dst_lookup, AF_INET);
#ifdef CONFIG_PROC_FS
if (rt_cache_proc_init())
goto out_enomem;
......@@ -2698,6 +2690,7 @@ int __init ip_rt_init(void)
#endif
#endif
xfrm_init();
xfrm4_init();
out:
return rc;
out_enomem:
......
......@@ -1798,7 +1798,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (sk->state == TCP_TIME_WAIT)
goto do_time_wait;
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb))
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (sk_filter(sk, skb, 0))
......@@ -1820,7 +1820,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
return ret;
no_tcp_socket:
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
......@@ -1840,7 +1840,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_it;
do_time_wait:
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
......
......@@ -946,7 +946,7 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb)) {
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return -1;
}
......@@ -1077,7 +1077,7 @@ int udp_rcv(struct sk_buff *skb)
return 0;
}
if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb))
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
/* No socket. Drop packet silently, if checksum is wrong */
......
/*
* xfrm4_input.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <net/ip.h>
#include <net/xfrm.h>
static kmem_cache_t *secpath_cachep;
int xfrm4_rcv(struct sk_buff *skb)
{
int err;
u32 spi, seq;
struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) != 0)
goto drop;
do {
struct iphdr *iph = skb->nh.iph;
if (xfrm_nr == XFRM_MAX_DEPTH)
goto drop;
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, iph->protocol, AF_INET);
if (x == NULL)
goto drop;
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID))
goto drop_unlock;
if (x->props.replay_window && xfrm_replay_check(x, seq))
goto drop_unlock;
if (x->type->input(x, skb))
goto drop_unlock;
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
xfrm_vec[xfrm_nr++] = x;
iph = skb->nh.iph;
if (x->props.mode) {
if (iph->protocol != IPPROTO_IPIP)
goto drop;
skb->nh.raw = skb->data;
iph = skb->nh.iph;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
decaps = 1;
break;
}
if ((err = xfrm_parse_spi(skb, skb->nh.iph->protocol, &spi, &seq)) < 0)
goto drop;
} while (!err);
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
kmem_cache_t *pool = skb->sp ? skb->sp->pool : secpath_cachep;
struct sec_path *sp;
sp = kmem_cache_alloc(pool, SLAB_ATOMIC);
if (!sp)
goto drop;
if (skb->sp) {
memcpy(sp, skb->sp, sizeof(struct sec_path));
secpath_put(skb->sp);
} else {
sp->pool = pool;
sp->len = 0;
}
atomic_set(&sp->refcnt, 1);
skb->sp = sp;
}
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
goto drop;
memcpy(skb->sp->xvec+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(void*));
skb->sp->len += xfrm_nr;
if (decaps) {
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
skb->dst = NULL;
}
netif_rx(skb);
return 0;
} else {
return -skb->nh.iph->protocol;
}
drop_unlock:
spin_unlock(&x->lock);
xfrm_state_put(x);
drop:
while (--xfrm_nr >= 0)
xfrm_state_put(xfrm_vec[xfrm_nr]);
kfree_skb(skb);
return 0;
}
void __init xfrm4_input_init(void)
{
secpath_cachep = kmem_cache_create("secpath4_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!secpath_cachep)
panic("IP: failed to allocate secpath4_cache\n");
}
/*
* xfrm4_policy.c
*
* Changes:
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/config.h>
#include <net/xfrm.h>
#include <net/ip.h>
extern struct dst_ops xfrm4_dst_ops;
extern struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct xfrm_type_map xfrm4_type_map = { .lock = RW_LOCK_UNLOCKED };
static int xfrm4_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
return __ip_route_output_key((struct rtable**)dst, fl);
}
/* Check that the bundle accepts the flow and its components are
* still valid.
*/
static int __xfrm4_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl)
{
do {
if (xdst->u.dst.ops != &xfrm4_dst_ops)
return 1;
if (!xfrm_selector_match(&xdst->u.dst.xfrm->sel, fl, AF_INET))
return 0;
if (xdst->u.dst.xfrm->km.state != XFRM_STATE_VALID ||
xdst->u.dst.path->obsolete > 0)
return 0;
xdst = (struct xfrm_dst*)xdst->u.dst.child;
} while (xdst);
return 0;
}
static struct dst_entry *
__xfrm4_find_bundle(struct flowi *fl, struct rtable *rt, struct xfrm_policy *policy)
{
struct dst_entry *dst;
if (!fl->fl4_src)
fl->fl4_src = rt->rt_src;
if (!fl->fl4_dst)
fl->fl4_dst = rt->rt_dst;
read_lock_bh(&policy->lock);
for (dst = policy->bundles; dst; dst = dst->next) {
struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
xdst->u.rt.fl.fl4_src == fl->fl4_src &&
__xfrm4_bundle_ok(xdst, fl)) {
dst_clone(dst);
break;
}
}
read_unlock_bh(&policy->lock);
return dst;
}
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
static int
__xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
struct flowi *fl, struct dst_entry **dst_p)
{
struct dst_entry *dst, *dst_prev;
struct rtable *rt0 = (struct rtable*)(*dst_p);
struct rtable *rt = rt0;
u32 remote = fl->fl4_dst;
u32 local = fl->fl4_src;
int i;
int err;
int header_len = 0;
int trailer_len = 0;
dst = dst_prev = NULL;
for (i = 0; i < nx; i++) {
struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops);
if (unlikely(dst1 == NULL)) {
err = -ENOBUFS;
goto error;
}
dst1->xfrm = xfrm[i];
if (!dst)
dst = dst1;
else {
dst_prev->child = dst1;
dst1->flags |= DST_NOHASH;
dst_clone(dst1);
}
dst_prev = dst1;
if (xfrm[i]->props.mode) {
remote = xfrm[i]->id.daddr.a4;
local = xfrm[i]->props.saddr.a4;
}
header_len += xfrm[i]->props.header_len;
trailer_len += xfrm[i]->props.trailer_len;
}
if (remote != fl->fl4_dst) {
struct flowi fl_tunnel = { .nl_u = { .ip4_u =
{ .daddr = remote,
.saddr = local }
}
};
err = xfrm_dst_lookup((struct xfrm_dst**)&rt, &fl_tunnel, AF_INET);
if (err)
goto error;
} else {
dst_hold(&rt->u.dst);
}
dst_prev->child = &rt->u.dst;
for (dst_prev = dst; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
x->u.rt.fl = *fl;
dst_prev->dev = rt->u.dst.dev;
if (rt->u.dst.dev)
dev_hold(rt->u.dst.dev);
dst_prev->obsolete = -1;
dst_prev->flags |= DST_HOST;
dst_prev->lastuse = jiffies;
dst_prev->header_len = header_len;
dst_prev->trailer_len = trailer_len;
memcpy(&dst_prev->metrics, &rt->u.dst.metrics, sizeof(dst_prev->metrics));
dst_prev->path = &rt->u.dst;
/* Copy neighbout for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
dst_prev->output = dst_prev->xfrm->type->output;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
x->u.rt.peer = rt->peer;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt.rt_flags = rt0->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
x->u.rt.rt_type = rt->rt_type;
x->u.rt.rt_src = rt0->rt_src;
x->u.rt.rt_dst = rt0->rt_dst;
x->u.rt.rt_gateway = rt->rt_gateway;
x->u.rt.rt_spec_dst = rt0->rt_spec_dst;
header_len -= x->u.dst.xfrm->props.header_len;
trailer_len -= x->u.dst.xfrm->props.trailer_len;
}
*dst_p = dst;
return 0;
error:
if (dst)
dst_free(dst);
return err;
}
static void
_decode_session4(struct sk_buff *skb, struct flowi *fl)
{
struct iphdr *iph = skb->nh.iph;
u8 *xprth = skb->nh.raw + iph->ihl*4;
if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
switch (iph->protocol) {
case IPPROTO_UDP:
case IPPROTO_TCP:
case IPPROTO_SCTP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
u16 *ports = (u16 *)xprth;
fl->uli_u.ports.sport = ports[0];
fl->uli_u.ports.dport = ports[1];
}
break;
case IPPROTO_ESP:
if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
u32 *ehdr = (u32 *)xprth;
fl->uli_u.spi = ehdr[0];
}
break;
case IPPROTO_AH:
if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
u32 *ah_hdr = (u32*)xprth;
fl->uli_u.spi = ah_hdr[1];
}
break;
default:
fl->uli_u.spi = 0;
break;
};
} else {
memset(fl, 0, sizeof(struct flowi));
}
fl->proto = iph->protocol;
fl->fl4_dst = iph->daddr;
fl->fl4_src = iph->saddr;
}
static inline int xfrm4_garbage_collect(void)
{
read_lock(&xfrm4_policy_afinfo.lock);
xfrm4_policy_afinfo.garbage_collect();
read_unlock(&xfrm4_policy_afinfo.lock);
return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2);
}
static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
{
struct dst_entry *path = dst->path;
if (mtu < 68 + dst->header_len)
return;
path->ops->update_pmtu(path, mtu);
}
struct dst_ops xfrm4_dst_ops = {
.family = AF_INET,
.protocol = __constant_htons(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
.gc_thresh = 1024,
.entry_size = sizeof(struct xfrm_dst),
};
struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.family = AF_INET,
.lock = RW_LOCK_UNLOCKED,
.type_map = &xfrm4_type_map,
.dst_ops = &xfrm4_dst_ops,
.dst_lookup = xfrm4_dst_lookup,
.find_bundle = __xfrm4_find_bundle,
.bundle_create = __xfrm4_bundle_create,
.decode_session = _decode_session4,
};
void __init xfrm4_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
}
void __exit xfrm4_policy_fini(void)
{
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
void __init xfrm4_init(void)
{
xfrm4_state_init();
xfrm4_policy_init();
xfrm4_input_init();
}
void __exit xfrm4_fini(void)
{
//xfrm4_input_fini();
xfrm4_policy_fini();
xfrm4_state_fini();
}
/*
* xfrm4_state.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
extern struct xfrm_state_afinfo xfrm4_state_afinfo;
static void
__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
struct xfrm_tmpl *tmpl,
xfrm_address_t *daddr, xfrm_address_t *saddr)
{
x->sel.daddr.a4 = fl->fl4_dst;
x->sel.saddr.a4 = fl->fl4_src;
x->sel.dport = fl->uli_u.ports.dport;
x->sel.dport_mask = ~0;
x->sel.sport = fl->uli_u.ports.sport;
x->sel.sport_mask = ~0;
x->sel.prefixlen_d = 32;
x->sel.prefixlen_s = 32;
x->sel.proto = fl->proto;
x->sel.ifindex = fl->oif;
x->id = tmpl->id;
if (x->id.daddr.a4 == 0)
x->id.daddr.a4 = daddr->a4;
x->props.saddr = tmpl->saddr;
if (x->props.saddr.a4 == 0)
x->props.saddr.a4 = saddr->a4;
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
x->props.family = AF_INET;
}
static struct xfrm_state *
__xfrm4_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto)
{
unsigned h = __xfrm4_spi_hash(daddr, spi, proto);
struct xfrm_state *x;
list_for_each_entry(x, xfrm4_state_afinfo.state_byspi+h, byspi) {
if (x->props.family == AF_INET &&
spi == x->id.spi &&
daddr->a4 == x->id.daddr.a4 &&
proto == x->id.proto) {
atomic_inc(&x->refcnt);
return x;
}
}
return NULL;
}
static struct xfrm_state *
__xfrm4_find_acq(u8 mode, u16 reqid, u8 proto,
xfrm_address_t *daddr, xfrm_address_t *saddr,
int create)
{
struct xfrm_state *x, *x0;
unsigned h = __xfrm4_dst_hash(daddr);
x0 = NULL;
list_for_each_entry(x, xfrm4_state_afinfo.state_bydst+h, bydst) {
if (x->props.family == AF_INET &&
daddr->a4 == x->id.daddr.a4 &&
mode == x->props.mode &&
proto == x->id.proto &&
saddr->a4 == x->props.saddr.a4 &&
reqid == x->props.reqid &&
x->km.state == XFRM_STATE_ACQ) {
if (!x0)
x0 = x;
if (x->id.spi)
continue;
x0 = x;
break;
}
}
if (x0) {
atomic_inc(&x0->refcnt);
} else if (create && (x0 = xfrm_state_alloc()) != NULL) {
x0->sel.daddr.a4 = daddr->a4;
x0->sel.saddr.a4 = saddr->a4;
x0->sel.prefixlen_d = 32;
x0->sel.prefixlen_s = 32;
x0->props.saddr.a4 = saddr->a4;
x0->km.state = XFRM_STATE_ACQ;
x0->id.daddr.a4 = daddr->a4;
x0->id.proto = proto;
x0->props.family = AF_INET;
x0->props.mode = mode;
x0->props.reqid = reqid;
x0->props.family = AF_INET;
x0->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
atomic_inc(&x0->refcnt);
mod_timer(&x0->timer, jiffies + XFRM_ACQ_EXPIRES*HZ);
atomic_inc(&x0->refcnt);
list_add_tail(&x0->bydst, xfrm4_state_afinfo.state_bydst+h);
wake_up(&km_waitq);
}
return x0;
}
static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.family = AF_INET,
.lock = RW_LOCK_UNLOCKED,
.init_tempsel = __xfrm4_init_tempsel,
.state_lookup = __xfrm4_state_lookup,
.find_acq = __xfrm4_find_acq,
};
void __init xfrm4_state_init(void)
{
xfrm_state_register_afinfo(&xfrm4_state_afinfo);
}
void __exit xfrm4_state_fini(void)
{
xfrm_state_unregister_afinfo(&xfrm4_state_afinfo);
}
......@@ -219,6 +219,36 @@ static struct xfrm_algo_desc ealg_list[] = {
},
};
static struct xfrm_algo_desc calg_list[] = {
{
.name = "deflate",
.uinfo = {
.comp = {
.threshold = 90,
}
},
.desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
},
{
.name = "lzs",
.uinfo = {
.comp = {
.threshold = 90,
}
},
.desc = { .sadb_alg_id = SADB_X_CALG_LZS }
},
{
.name = "lzjh",
.uinfo = {
.comp = {
.threshold = 50,
}
},
.desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
},
};
static inline int aalg_entries(void)
{
return sizeof(aalg_list) / sizeof(aalg_list[0]);
......@@ -229,6 +259,12 @@ static inline int ealg_entries(void)
return sizeof(ealg_list) / sizeof(ealg_list[0]);
}
static inline int calg_entries(void)
{
return sizeof(calg_list) / sizeof(calg_list[0]);
}
/* Todo: generic iterators */
struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
{
int i;
......@@ -259,6 +295,21 @@ struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
return NULL;
}
struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
{
int i;
for (i = 0; i < calg_entries(); i++) {
if (calg_list[i].desc.sadb_alg_id == alg_id) {
if (calg_list[i].available)
return &calg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name)
{
int i;
......@@ -295,6 +346,24 @@ struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name)
return NULL;
}
struct xfrm_algo_desc *xfrm_calg_get_byname(char *name)
{
int i;
if (!name)
return NULL;
for (i=0; i < calg_entries(); i++) {
if (strcmp(name, calg_list[i].name) == 0) {
if (calg_list[i].available)
return &calg_list[i];
else
break;
}
}
return NULL;
}
struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
{
if (idx >= aalg_entries())
......@@ -311,6 +380,14 @@ struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
return &ealg_list[idx];
}
struct xfrm_algo_desc *xfrm_calg_get_byidx(unsigned int idx)
{
if (idx >= calg_entries())
return NULL;
return &calg_list[idx];
}
/*
* Probe for the availability of crypto algorithms, and set the available
* flag for any algorithms found on the system. This is typically called by
......@@ -334,6 +411,12 @@ void xfrm_probe_algs(void)
if (ealg_list[i].available != status)
ealg_list[i].available = status;
}
for (i = 0; i < calg_entries(); i++) {
status = crypto_alg_available(calg_list[i].name, 0);
if (calg_list[i].available != status)
calg_list[i].available = status;
}
#endif
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -2,11 +2,11 @@
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
* Changes
*
* Mitsuru KANDA @USAGI : IPv6 Support
* Kazunori MIYAZAWA @USAGI :
* Kunihiro Ishiguro :
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro
* IPv6 support
*
*/
......@@ -24,9 +24,6 @@
#include <linux/ipsec.h>
#include <linux/init.h>
#include <linux/security.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/in6.h>
#endif
#include <net/sock.h>
#include <net/xfrm.h>
......@@ -191,19 +188,7 @@ static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
goto error;
err = -ENOENT;
switch (x->props.family) {
case AF_INET:
x->type = xfrm_get_type(x->id.proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
x->type = xfrm6_get_type(x->id.proto);
break;
#endif
default:
x->type = NULL;
break;
}
x->type = xfrm_get_type(x->id.proto, x->props.family);
if (x->type == NULL)
goto error;
......@@ -238,21 +223,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
if (!x)
return err;
switch (x->props.family) {
case AF_INET:
x1 = xfrm4_state_lookup(x->props.saddr.xfrm4_addr,
x->id.spi, x->id.proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
x1 = xfrm6_state_lookup((struct in6_addr*)x->props.saddr.a6,
x->id.spi, x->id.proto);
break;
#endif
default:
x1 = NULL;
break;
}
x1 = xfrm_state_lookup(&x->props.saddr, x->id.spi, x->id.proto, x->props.family);
if (x1) {
xfrm_state_put(x);
xfrm_state_put(x1);
......@@ -269,19 +240,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
struct xfrm_state *x;
struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
switch (p->family) {
case AF_INET:
x = xfrm4_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
x = xfrm6_state_lookup((struct in6_addr*)p->saddr.a6, p->spi, p->proto);
break;
#endif
default:
x = NULL;
break;
}
x = xfrm_state_lookup(&p->saddr, p->spi, p->proto, p->family);
if (x == NULL)
return -ESRCH;
......@@ -399,19 +358,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
struct sk_buff *resp_skb;
int err;
switch (p->family) {
case AF_INET:
x = xfrm4_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
x = xfrm6_state_lookup((struct in6_addr*)p->saddr.a6, p->spi, p->proto);
break;
#endif
default:
x = NULL;
break;
}
x = xfrm_state_lookup(&p->saddr, p->spi, p->proto, p->family);
err = -ESRCH;
if (x == NULL)
goto out_noput;
......@@ -462,23 +409,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **
err = verify_userspi_info(p);
if (err)
goto out_noput;
switch (p->info.family) {
case AF_INET:
x = xfrm_find_acq(p->info.mode, p->info.reqid, p->info.id.proto,
p->info.sel.daddr.xfrm4_addr,
p->info.sel.saddr.xfrm4_addr, 1);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
x = xfrm6_find_acq(p->info.mode, p->info.reqid, p->info.id.proto,
(struct in6_addr*)p->info.sel.daddr.a6,
(struct in6_addr*)p->info.sel.saddr.a6, 1);
break;
#endif
default:
x = NULL;
break;
}
x = xfrm_find_acq(p->info.mode, p->info.reqid, p->info.id.proto,
&p->info.sel.daddr,
&p->info.sel.saddr, 1,
p->info.family);
err = -ENOENT;
if (x == NULL)
goto out_noput;
......@@ -1086,10 +1020,26 @@ struct xfrm_policy *xfrm_compile_policy(u16 family, int opt,
struct xfrm_policy *xp;
int nr;
if (opt != IP_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
switch (family) {
case AF_INET:
if (opt != IP_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
if (opt != IPV6_XFRM_POLICY) {
*dir = -EOPNOTSUPP;
return NULL;
}
break;
#endif
default:
*dir = -EINVAL;
return NULL;
}
*dir = -EINVAL;
if (len < sizeof(*p) ||
......
......@@ -8,7 +8,8 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
ip6_flowlabel.o ipv6_syms.o
ip6_flowlabel.o ipv6_syms.o \
xfrm6_policy.o xfrm6_state.o xfrm6_input.o
obj-$(CONFIG_INET6_AH) += ah6.o
obj-$(CONFIG_INET6_ESP) += esp6.o
......
......@@ -228,7 +228,7 @@ void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
type != ICMPV6_PKT_TOOBIG)
return;
x = xfrm6_state_lookup(&iph->daddr, ah->spi, IPPROTO_AH);
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
if (!x)
return;
......@@ -336,14 +336,14 @@ int __init ah6_init(void)
{
SET_MODULE_OWNER(&ah6_type);
if (xfrm6_register_type(&ah6_type) < 0) {
if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
xfrm6_unregister_type(&ah6_type);
xfrm_unregister_type(&ah6_type, AF_INET6);
return -EAGAIN;
}
......@@ -355,7 +355,7 @@ static void __exit ah6_fini(void)
if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
if (xfrm6_unregister_type(&ah6_type) < 0)
if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
}
......
......@@ -377,7 +377,7 @@ void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
type != ICMPV6_PKT_TOOBIG)
return;
x = xfrm6_state_lookup(&iph->daddr, esph->spi, IPPROTO_ESP);
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA ESP/%08x/"
......@@ -504,13 +504,13 @@ static struct inet6_protocol esp6_protocol = {
int __init esp6_init(void)
{
SET_MODULE_OWNER(&esp6_type);
if (xfrm6_register_type(&esp6_type) < 0) {
if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
printk(KERN_INFO "ipv6 esp init: can't add protocol\n");
xfrm6_unregister_type(&esp6_type);
xfrm_unregister_type(&esp6_type, AF_INET6);
return -EAGAIN;
}
......@@ -521,7 +521,7 @@ static void __exit esp6_fini(void)
{
if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
printk(KERN_INFO "ipv6 esp close: can't remove protocol\n");
if (xfrm6_unregister_type(&esp6_type) < 0)
if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n");
}
......
......@@ -643,7 +643,8 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
if (flags&MSG_PROBE)
goto out;
/* alloc skb with mtu as we do in the IPv4 stack for IPsec */
skb = sock_alloc_send_skb(sk, mtu, flags & MSG_DONTWAIT, &err);
skb = sock_alloc_send_skb(sk, mtu + LL_RESERVED_SPACE(dev),
flags & MSG_DONTWAIT, &err);
if (skb == NULL) {
IP6_INC_STATS(Ip6OutDiscards);
......
......@@ -47,6 +47,7 @@
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/xfrm.h>
#include <asm/uaccess.h>
......@@ -404,6 +405,10 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char *optval,
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
break;
case IPV6_IPSEC_POLICY:
case IPV6_XFRM_POLICY:
retv = xfrm_user_policy(sk, optname, optval, optlen);
break;
#ifdef CONFIG_NETFILTER
default:
......
......@@ -5,6 +5,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/xfrm.h>
EXPORT_SYMBOL(ipv6_addr_type);
EXPORT_SYMBOL(icmpv6_send);
......@@ -31,3 +32,5 @@ EXPORT_SYMBOL(ipv6_get_saddr);
EXPORT_SYMBOL(ipv6_chk_addr);
EXPORT_SYMBOL(in6addr_any);
EXPORT_SYMBOL(in6addr_loopback);
EXPORT_SYMBOL(xfrm6_rcv);
EXPORT_SYMBOL(xfrm6_clear_mutable_options);
......@@ -1867,15 +1867,6 @@ ctl_table ipv6_route_table[] = {
#endif
int xfrm6_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
int err = 0;
*dst = (struct xfrm_dst*)ip6_route_output(NULL, fl);
if (!*dst)
err = -ENETUNREACH;
return err;
}
void __init ip6_route_init(void)
{
ip6_dst_ops.kmem_cachep = kmem_cache_create("ip6_dst_cache",
......@@ -1883,11 +1874,11 @@ void __init ip6_route_init(void)
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
fib6_init();
xfrm_dst_lookup_register(xfrm6_dst_lookup, AF_INET6);
#ifdef CONFIG_PROC_FS
proc_net_create("ipv6_route", 0, rt6_proc_info);
proc_net_create("rt6_stats", 0, rt6_proc_stats);
#endif
xfrm6_init();
}
#ifdef MODULE
......@@ -1897,7 +1888,7 @@ void ip6_route_cleanup(void)
proc_net_remove("ipv6_route");
proc_net_remove("rt6_stats");
#endif
xfrm_dst_lookup_unregister(AF_INET6);
xfrm6_fini();
rt6_ifdown(NULL);
fib6_gc_cleanup();
}
......
......@@ -967,7 +967,7 @@ static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
struct ipv6_pinfo *np = inet6_sk(sk);
if (skb->ip_summed == CHECKSUM_HW) {
th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
skb->csum = offsetof(struct tcphdr, check);
} else {
th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
......@@ -1642,7 +1642,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
goto discard_and_relse;
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_it;
goto discard_and_relse;
skb->dev = NULL;
......
/*
* xfrm6_input.c: based on net/ipv4/xfrm4_input.c
*
* Authors:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro
* YOSHIFUJI Hideaki @USAGI
* IPv6 support
*/
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
static kmem_cache_t *secpath_cachep;
static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
{
u8 *opt = (u8 *)opthdr;
int len = ipv6_optlen(opthdr);
int off = 0;
int optlen = 0;
off += 2;
len -= 2;
while (len > 0) {
switch (opt[off]) {
case IPV6_TLV_PAD0:
optlen = 1;
break;
default:
if (len < 2)
goto bad;
optlen = opt[off+1]+2;
if (len < optlen)
goto bad;
if (opt[off] & 0x20)
memset(&opt[off+2], 0, opt[off+1]);
break;
}
off += optlen;
len -= optlen;
}
if (len == 0)
return 1;
bad:
return 0;
}
int xfrm6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
unsigned int packet_len = skb->tail - skb->nh.raw;
u8 nexthdr = skb->nh.ipv6h->nexthdr;
u8 nextnexthdr = 0;
*nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
while (offset + 1 <= packet_len) {
switch (nexthdr) {
case NEXTHDR_HOP:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun hopopts\n");
return 0;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_DEST:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun destopt\n");
return 0;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_AUTH:
if (dir == XFRM_POLICY_OUT) {
memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
(((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
}
if (exthdr->nexthdr == NEXTHDR_DEST) {
offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
nextnexthdr = exthdr->nexthdr;
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun destopt\n");
return 0;
}
}
return nexthdr;
default :
return nexthdr;
}
}
return nexthdr;
}
int xfrm6_rcv(struct sk_buff *skb)
{
int err;
u32 spi, seq;
struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
struct ipv6hdr *hdr = skb->nh.ipv6h;
unsigned char *tmp_hdr = NULL;
int hdr_len = 0;
u16 nh_offset = 0;
u8 nexthdr = 0;
if (hdr->nexthdr == IPPROTO_AH || hdr->nexthdr == IPPROTO_ESP) {
nh_offset = ((unsigned char*)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
hdr_len = sizeof(struct ipv6hdr);
} else {
hdr_len = skb->h.raw - skb->nh.raw;
}
tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_hdr)
goto drop;
memcpy(tmp_hdr, skb->nh.raw, hdr_len);
nexthdr = xfrm6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
hdr->priority = 0;
hdr->flow_lbl[0] = 0;
hdr->flow_lbl[1] = 0;
hdr->flow_lbl[2] = 0;
hdr->hop_limit = 0;
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
do {
struct ipv6hdr *iph = skb->nh.ipv6h;
if (xfrm_nr == XFRM_MAX_DEPTH)
goto drop;
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, nexthdr, AF_INET6);
if (x == NULL)
goto drop;
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID))
goto drop_unlock;
if (x->props.replay_window && xfrm_replay_check(x, seq))
goto drop_unlock;
nexthdr = x->type->input(x, skb);
if (nexthdr <= 0)
goto drop_unlock;
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
xfrm_vec[xfrm_nr++] = x;
iph = skb->nh.ipv6h; /* ??? */
if (nexthdr == NEXTHDR_DEST) {
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
err = -EINVAL;
goto drop;
}
nexthdr = skb->h.raw[0];
nh_offset = skb->h.raw - skb->nh.raw;
skb_pull(skb, (skb->h.raw[1]+1)<<3);
skb->h.raw = skb->data;
}
if (x->props.mode) { /* XXX */
if (iph->nexthdr != IPPROTO_IPV6)
goto drop;
skb->nh.raw = skb->data;
iph = skb->nh.ipv6h;
decaps = 1;
break;
}
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) < 0)
goto drop;
} while (!err);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.raw[nh_offset] = nexthdr;
skb->nh.ipv6h->payload_len = htons(hdr_len + skb->len - sizeof(struct ipv6hdr));
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
kmem_cache_t *pool = skb->sp ? skb->sp->pool : secpath_cachep;
struct sec_path *sp;
sp = kmem_cache_alloc(pool, SLAB_ATOMIC);
if (!sp)
goto drop;
if (skb->sp) {
memcpy(sp, skb->sp, sizeof(struct sec_path));
secpath_put(skb->sp);
} else {
sp->pool = pool;
sp->len = 0;
}
atomic_set(&sp->refcnt, 1);
skb->sp = sp;
}
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
goto drop;
memcpy(skb->sp->xvec+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(void*));
skb->sp->len += xfrm_nr;
if (decaps) {
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
skb->dst = NULL;
}
netif_rx(skb);
return 0;
} else {
return -nexthdr;
}
drop_unlock:
spin_unlock(&x->lock);
xfrm_state_put(x);
drop:
if (tmp_hdr) kfree(tmp_hdr);
while (--xfrm_nr >= 0)
xfrm_state_put(xfrm_vec[xfrm_nr]);
kfree_skb(skb);
return 0;
}
void __init xfrm6_input_init(void)
{
secpath_cachep = kmem_cache_create("secpath6_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!secpath_cachep)
panic("IPv6: failed to allocate secpath6_cache\n");
}
/*
* xfrm6_policy.c: based on xfrm4_policy.c
*
* Authors:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro
* IPv6 support
* YOSHIFUJI Hideaki
* Split up af-specific portion
*
*/
#include <linux/config.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
extern struct dst_ops xfrm6_dst_ops;
extern struct xfrm_policy_afinfo xfrm6_policy_afinfo;
static struct xfrm_type_map xfrm6_type_map = { .lock = RW_LOCK_UNLOCKED };
int xfrm6_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
int err = 0;
*dst = (struct xfrm_dst*)ip6_route_output(NULL, fl);
if (!*dst)
err = -ENETUNREACH;
return err;
}
/* Check that the bundle accepts the flow and its components are
* still valid.
*/
static int __xfrm6_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl)
{
do {
if (xdst->u.dst.ops != &xfrm6_dst_ops)
return 1;
if (!xfrm_selector_match(&xdst->u.dst.xfrm->sel, fl, AF_INET6))
return 0;
if (xdst->u.dst.xfrm->km.state != XFRM_STATE_VALID ||
xdst->u.dst.path->obsolete > 0)
return 0;
xdst = (struct xfrm_dst*)xdst->u.dst.child;
} while (xdst);
return 0;
}
static struct dst_entry *
__xfrm6_find_bundle(struct flowi *fl, struct rtable *rt, struct xfrm_policy *policy)
{
struct dst_entry *dst;
/* Still not clear if we should set fl->fl6_{src,dst}... */
read_lock_bh(&policy->lock);
for (dst = policy->bundles; dst; dst = dst->next) {
struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
if (!ipv6_addr_cmp(&xdst->u.rt6.rt6i_dst.addr, fl->fl6_dst) &&
!ipv6_addr_cmp(&xdst->u.rt6.rt6i_src.addr, fl->fl6_src) &&
__xfrm6_bundle_ok(xdst, fl)) {
dst_clone(dst);
break;
}
}
read_unlock_bh(&policy->lock);
return dst;
}
/* Allocate chain of dst_entry's, attach known xfrm's, calculate
* all the metrics... Shortly, bundle a bundle.
*/
static int
__xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
struct flowi *fl, struct dst_entry **dst_p)
{
struct dst_entry *dst, *dst_prev;
struct rt6_info *rt0 = (struct rt6_info*)(*dst_p);
struct rt6_info *rt = rt0;
struct in6_addr *remote = fl->fl6_dst;
struct in6_addr *local = fl->fl6_src;
int i;
int err = 0;
int header_len = 0;
int trailer_len = 0;
dst = dst_prev = NULL;
for (i = 0; i < nx; i++) {
struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops);
if (unlikely(dst1 == NULL)) {
err = -ENOBUFS;
goto error;
}
dst1->xfrm = xfrm[i];
if (!dst)
dst = dst1;
else {
dst_prev->child = dst1;
dst1->flags |= DST_NOHASH;
dst_clone(dst1);
}
dst_prev = dst1;
if (xfrm[i]->props.mode) {
remote = (struct in6_addr*)&xfrm[i]->id.daddr;
local = (struct in6_addr*)&xfrm[i]->props.saddr;
}
header_len += xfrm[i]->props.header_len;
trailer_len += xfrm[i]->props.trailer_len;
}
if (ipv6_addr_cmp(remote, fl->fl6_dst)) {
struct flowi fl_tunnel = { .nl_u = { .ip6_u =
{ .daddr = remote,
.saddr = local }
}
};
err = xfrm_dst_lookup((struct xfrm_dst**)&rt, &fl_tunnel, AF_INET6);
if (err)
goto error;
} else {
dst_hold(&rt->u.dst);
}
dst_prev->child = &rt->u.dst;
for (dst_prev = dst; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
x->u.rt.fl = *fl;
dst_prev->dev = rt->u.dst.dev;
if (rt->u.dst.dev)
dev_hold(rt->u.dst.dev);
dst_prev->obsolete = -1;
dst_prev->flags |= DST_HOST;
dst_prev->lastuse = jiffies;
dst_prev->header_len = header_len;
dst_prev->trailer_len = trailer_len;
memcpy(&dst_prev->metrics, &rt->u.dst.metrics, sizeof(dst_prev->metrics));
dst_prev->path = &rt->u.dst;
/* Copy neighbout for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
dst_prev->output = dst_prev->xfrm->type->output;
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
x->u.rt6.rt6i_metric = rt0->rt6i_metric;
x->u.rt6.rt6i_node = rt0->rt6i_node;
x->u.rt6.rt6i_hoplimit = rt0->rt6i_hoplimit;
x->u.rt6.rt6i_gateway = rt0->rt6i_gateway;
memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway));
header_len -= x->u.dst.xfrm->props.header_len;
trailer_len -= x->u.dst.xfrm->props.trailer_len;
}
*dst_p = dst;
return 0;
error:
if (dst)
dst_free(dst);
return err;
}
static inline void
_decode_session6(struct sk_buff *skb, struct flowi *fl)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6hdr *hdr = skb->nh.ipv6h;
struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
u8 nexthdr = skb->nh.ipv6h->nexthdr;
fl->fl6_dst = &hdr->daddr;
fl->fl6_src = &hdr->saddr;
while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) {
switch (nexthdr) {
case NEXTHDR_ROUTING:
case NEXTHDR_HOP:
case NEXTHDR_DEST:
offset += ipv6_optlen(exthdr);
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case IPPROTO_UDP:
case IPPROTO_TCP:
case IPPROTO_SCTP:
if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) {
u16 *ports = (u16 *)exthdr;
fl->uli_u.ports.sport = ports[0];
fl->uli_u.ports.dport = ports[1];
}
return;
/* XXX Why are there these headers? */
case IPPROTO_AH:
case IPPROTO_ESP:
default:
fl->uli_u.spi = 0;
return;
};
}
}
static inline int xfrm6_garbage_collect(void)
{
read_lock(&xfrm6_policy_afinfo.lock);
xfrm6_policy_afinfo.garbage_collect();
read_unlock(&xfrm6_policy_afinfo.lock);
return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2);
}
static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
{
struct dst_entry *path = dst->path;
if (mtu >= 1280 && mtu < dst_pmtu(dst))
return;
path->ops->update_pmtu(path, mtu);
}
struct dst_ops xfrm6_dst_ops = {
.family = AF_INET6,
.protocol = __constant_htons(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
.gc_thresh = 1024,
.entry_size = sizeof(struct xfrm_dst),
};
struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.family = AF_INET6,
.lock = RW_LOCK_UNLOCKED,
.type_map = &xfrm6_type_map,
.dst_ops = &xfrm6_dst_ops,
.dst_lookup = xfrm6_dst_lookup,
.find_bundle = __xfrm6_find_bundle,
.bundle_create = __xfrm6_bundle_create,
.decode_session = _decode_session6,
};
void __init xfrm6_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm6_policy_afinfo);
}
void __exit xfrm6_policy_fini(void)
{
xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo);
}
void __init xfrm6_init(void)
{
xfrm6_policy_init();
xfrm6_state_init();
xfrm6_input_init();
}
void __exit xfrm6_fini(void)
{
//xfrm6_input_fini();
xfrm6_policy_fini();
xfrm6_state_fini();
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -110,6 +110,7 @@ int sctp_rcv(struct sk_buff *skb)
struct sctphdr *sh;
union sctp_addr src;
union sctp_addr dest;
int family;
struct sctp_af *af;
int ret = 0;
......@@ -129,7 +130,8 @@ int sctp_rcv(struct sk_buff *skb)
skb_pull(skb, sizeof(struct sctphdr));
af = sctp_get_af_specific(ipver2af(skb->nh.iph->version));
family = ipver2af(skb->nh.iph->version);
af = sctp_get_af_specific(family);
if (unlikely(!af))
goto bad_packet;
......@@ -173,7 +175,7 @@ int sctp_rcv(struct sk_buff *skb)
rcvr = asoc ? &asoc->base : &ep->base;
sk = rcvr->sk;
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb))
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
ret = sk_filter(sk, skb, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment