Commit 5792771d authored by Kazunori Miyazawa's avatar Kazunori Miyazawa Committed by David S. Miller

[IPSEC]: Add full ipv6 support.

Credits also to Mitsuru Kanda <kanda@karaba.org>,
YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>,
and Kunihiro Ishiguro.
parent 86e7191b
......@@ -74,6 +74,21 @@ struct rt0_hdr {
#define rt0_type rt_hdr.type;
};
struct ipv6_auth_hdr {
__u8 nexthdr;
__u8 hdrlen; /* This one is measured in 32 bit units! */
__u16 reserved;
__u32 spi;
__u32 seq_no; /* Sequence number */
__u8 auth_data[4]; /* Length variable but >=4. Mind the 64 bit alignment! */
};
struct ipv6_esp_hdr {
__u32 spi;
__u32 seq_no; /* Sequence number */
__u8 enc_data[8]; /* Length variable but >=8. Mind the 64 bit alignment! */
};
/*
* IPv6 fixed header
*
......
......@@ -247,7 +247,10 @@ extern void dst_init(void);
struct flowi;
extern int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags);
extern int xfrm6_lookup(struct dst_entry **dst_p, struct flowi *fl,
struct sock *sk, int flags);
extern void xfrm_init(void);
extern void xfrm6_init(void);
#endif
......
......@@ -57,6 +57,8 @@ extern struct rt6_info *rt6_lookup(struct in6_addr *daddr,
struct in6_addr *saddr,
int oif, int flags);
extern struct rt6_info *ndisc_get_dummy_rt(void);
/*
* support functions for ND
*
......
......@@ -12,6 +12,7 @@
#include <net/dst.h>
#include <net/route.h>
#include <net/ip6_fib.h>
#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
......@@ -282,6 +283,7 @@ struct xfrm_dst
struct xfrm_dst *next;
struct dst_entry dst;
struct rtable rt;
struct rt6_info rt6;
} u;
};
......@@ -308,26 +310,42 @@ secpath_put(struct sec_path *sp)
if (sp && atomic_dec_and_test(&sp->refcnt))
__secpath_destroy(sp);
}
extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb);
extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
if (sk && sk->policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, dir, skb);
return __xfrm_policy_check(sk, dir, skb, AF_INET);
return !xfrm_policy_list[dir] ||
(skb->dst->flags & DST_NOPOLICY) ||
__xfrm_policy_check(sk, dir, skb);
__xfrm_policy_check(sk, dir, skb, AF_INET);
}
extern int __xfrm_route_forward(struct sk_buff *skb);
static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
{
if (sk && sk->policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, dir, skb, AF_INET6);
return !xfrm_policy_list[dir] ||
(skb->dst->flags & DST_NOPOLICY) ||
__xfrm_policy_check(sk, dir, skb, AF_INET6);
}
extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
static inline int xfrm_route_forward(struct sk_buff *skb)
{
return !xfrm_policy_list[XFRM_POLICY_OUT] ||
(skb->dst->flags & DST_NOXFRM) ||
__xfrm_route_forward(skb);
__xfrm_route_forward(skb, AF_INET);
}
static inline int xfrm6_route_forward(struct sk_buff *skb)
{
return !xfrm_policy_list[XFRM_POLICY_OUT] ||
(skb->dst->flags & DST_NOXFRM) ||
__xfrm_route_forward(skb, AF_INET6);
}
extern int __xfrm_sk_clone_policy(struct sock *sk);
......@@ -380,12 +398,16 @@ extern void xfrm_state_init(void);
extern void xfrm_input_init(void);
extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
extern struct xfrm_state *xfrm_state_alloc(void);
extern struct xfrm_state *xfrm_state_find(u32 daddr, u32 saddr, struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err);
extern struct xfrm_state *xfrm4_state_find(u32 daddr, u32 saddr, struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err);
extern struct xfrm_state *xfrm6_state_find(struct in6_addr *daddr, struct in6_addr *saddr,
struct flowi *fl, struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err);
extern int xfrm_state_check_expire(struct xfrm_state *x);
extern void xfrm_state_insert(struct xfrm_state *x);
extern int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb);
extern struct xfrm_state *xfrm_state_lookup(u32 daddr, u32 spi, u8 proto);
extern struct xfrm_state *xfrm4_state_lookup(u32 daddr, u32 spi, u8 proto);
extern struct xfrm_state *xfrm6_state_lookup(struct in6_addr *daddr, u32 spi, u8 proto);
extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
extern void xfrm_state_delete(struct xfrm_state *x);
extern void xfrm_state_flush(u8 proto);
......@@ -393,17 +415,21 @@ extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
extern int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl);
extern int xfrm4_rcv(struct sk_buff *skb);
extern int xfrm6_rcv(struct sk_buff *skb);
extern int xfrm6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir);
extern int xfrm_user_policy(struct sock *sk, int optname, u8 *optval, int optlen);
struct xfrm_policy *xfrm_policy_alloc(int gfp);
extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl);
struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl, unsigned short family);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
struct xfrm_policy *xfrm_policy_delete(int dir, struct xfrm_selector *sel);
struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete);
void xfrm_policy_flush(void);
void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state * xfrm_find_acq(u8 mode, u16 reqid, u8 proto, u32 daddr, u32 saddr, int create);
struct xfrm_state * xfrm6_find_acq(u8 mode, u16 reqid, u8 proto, struct in6_addr *daddr,
struct in6_addr *saddr, int create);
extern void xfrm_policy_flush(void);
extern void xfrm_policy_kill(struct xfrm_policy *);
extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
......@@ -425,23 +451,129 @@ extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name);
extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name);
static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
{
__u32 *a1 = token1;
__u32 *a2 = token2;
int pdw;
int pbi;
pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
if (pdw)
if (memcmp(a1, a2, pdw << 2))
return 0;
if (pbi) {
__u32 mask;
mask = htonl((0xffffffff) << (32 - pbi));
if ((a1[pdw] ^ a2[pdw]) & mask)
return 0;
}
return 1;
}
static inline int
xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
{
return !memcmp(fl->fl6_dst, sel->daddr.a6, sizeof(struct in6_addr)) &&
!((fl->uli_u.ports.dport^sel->dport)&sel->dport_mask) &&
!((fl->uli_u.ports.sport^sel->sport)&sel->sport_mask) &&
(fl->proto == sel->proto || !sel->proto) &&
(fl->oif == sel->ifindex || !sel->ifindex) &&
!memcmp(fl->fl6_src, sel->saddr.a6, sizeof(struct in6_addr));
return addr_match(fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
addr_match(fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
!((fl->uli_u.ports.dport^sel->dport)&sel->dport_mask) &&
!((fl->uli_u.ports.sport^sel->sport)&sel->sport_mask) &&
(fl->proto == sel->proto || !sel->proto) &&
(fl->oif == sel->ifindex || !sel->ifindex);
}
extern int xfrm6_register_type(struct xfrm_type *type);
extern int xfrm6_unregister_type(struct xfrm_type *type);
extern struct xfrm_type *xfrm6_get_type(u8 proto);
extern struct xfrm_state *xfrm6_state_lookup(struct in6_addr *daddr, u32 spi, u8 proto);
struct xfrm_state * xfrm6_find_acq(u8 mode, u16 reqid, u8 proto, struct in6_addr *daddr, struct in6_addr *saddr, int create);
void xfrm6_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct ah_data
{
u8 *key;
int key_len;
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
void (*icv)(struct ah_data*,
struct sk_buff *skb, u8 *icv);
struct crypto_tfm *tfm;
};
struct esp_data
{
/* Confidentiality */
struct {
u8 *key; /* Key */
int key_len; /* Key length */
u8 *ivec; /* ivec buffer */
/* ivlen is offset from enc_data, where encrypted data start.
* It is logically different of crypto_tfm_alg_ivsize(tfm).
* We assume that it is either zero (no ivec), or
* >= crypto_tfm_alg_ivsize(tfm). */
int ivlen;
int padlen; /* 0..255 */
struct crypto_tfm *tfm; /* crypto handle */
} conf;
/* Integrity. It is active when icv_full_len != 0 */
struct {
u8 *key; /* Key */
int key_len; /* Length of the key */
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
void (*icv)(struct esp_data*,
struct sk_buff *skb,
int offset, int len, u8 *icv);
struct crypto_tfm *tfm;
} auth;
};
typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int);
extern void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update);
extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update);
extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len);
extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
static inline void
ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
{
struct crypto_tfm *tfm = ahp->tfm;
memset(auth_data, 0, ahp->icv_trunc_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_ah_walk(skb, tfm, crypto_hmac_update);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
}
static inline void
esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
int len, u8 *auth_data)
{
struct crypto_tfm *tfm = esp->auth.tfm;
char *icv = esp->auth.work_icv;
memset(auth_data, 0, esp->auth.icv_trunc_len);
crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
memcpy(auth_data, icv, esp->auth.icv_trunc_len);
}
typedef int (xfrm_dst_lookup_t)(struct xfrm_dst **dst, struct flowi *fl);
int xfrm_dst_lookup_register(xfrm_dst_lookup_t *dst_lookup, unsigned short family);
void xfrm_dst_lookup_unregister(unsigned short family);
#endif /* _NET_XFRM_H */
......@@ -7,25 +7,8 @@
#include <net/icmp.h>
#include <asm/scatterlist.h>
#define AH_HLEN_NOICV 12
typedef void (icv_update_fn_t)(struct crypto_tfm *,
struct scatterlist *, unsigned int);
struct ah_data
{
u8 *key;
int key_len;
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
void (*icv)(struct ah_data*,
struct sk_buff *skb, u8 *icv);
struct crypto_tfm *tfm;
};
#define AH_HLEN_NOICV 12
/* Clear mutable options and find final destination to substitute
* into IP header for icv calculation. Options are already checked
......@@ -71,92 +54,6 @@ static int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr)
return 0;
}
static void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update)
{
int offset = 0;
int len = skb->len;
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_ah_walk(list, tfm, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
}
static void
ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
{
struct crypto_tfm *tfm = ahp->tfm;
memset(auth_data, 0, ahp->icv_trunc_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_ah_walk(skb, tfm, crypto_hmac_update);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
}
static int ah_output(struct sk_buff *skb)
{
int err;
......@@ -330,7 +227,7 @@ void ah4_err(struct sk_buff *skb, u32 info)
skb->h.icmph->code != ICMP_FRAG_NEEDED)
return;
x = xfrm_state_lookup(iph->daddr, ah->spi, IPPROTO_AH);
x = xfrm4_state_lookup(iph->daddr, ah->spi, IPPROTO_AH);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA AH/%08x/%08x\n",
......
......@@ -8,312 +8,8 @@
#include <linux/random.h>
#include <net/icmp.h>
#define MAX_SG_ONSTACK 4
typedef void (icv_update_fn_t)(struct crypto_tfm *,
struct scatterlist *, unsigned int);
/* BUGS:
* - we assume replay seqno is always present.
*/
struct esp_data
{
/* Confidentiality */
struct {
u8 *key; /* Key */
int key_len; /* Key length */
u8 *ivec; /* ivec buffer */
/* ivlen is offset from enc_data, where encrypted data start.
* It is logically different of crypto_tfm_alg_ivsize(tfm).
* We assume that it is either zero (no ivec), or
* >= crypto_tfm_alg_ivsize(tfm). */
int ivlen;
int padlen; /* 0..255 */
struct crypto_tfm *tfm; /* crypto handle */
} conf;
/* Integrity. It is active when icv_full_len != 0 */
struct {
u8 *key; /* Key */
int key_len; /* Length of the key */
u8 *work_icv;
int icv_full_len;
int icv_trunc_len;
void (*icv)(struct esp_data*,
struct sk_buff *skb,
int offset, int len, u8 *icv);
struct crypto_tfm *tfm;
} auth;
};
/* Move to common area: it is shared with AH. */
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update)
{
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_icv_walk(list, tfm, offset-start, copy, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
}
/* Looking generic it is not used in another places. */
int
skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int start = skb->len - skb->data_len;
int i, copy = start - offset;
int elt = 0;
if (copy > 0) {
if (copy > len)
copy = len;
sg[elt].page = virt_to_page(skb->data + offset);
sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg[elt].length = copy;
elt++;
if ((len -= copy) == 0)
return elt;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg[elt].page = frag->page;
sg[elt].offset = frag->page_offset+offset-start;
sg[elt].length = copy;
elt++;
if (!(len -= copy))
return elt;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
if ((len -= copy) == 0)
return elt;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
return elt;
}
/* Common with AH after some work on arguments. */
static void
esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
int len, u8 *auth_data)
{
struct crypto_tfm *tfm = esp->auth.tfm;
char *icv = esp->auth.work_icv;
memset(auth_data, 0, esp->auth.icv_trunc_len);
crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
memcpy(auth_data, icv, esp->auth.icv_trunc_len);
}
/* Check that skb data bits are writable. If they are not, copy data
* to newly created private area. If "tailbits" is given, make sure that
* tailbits bytes beyond current end of skb are writable.
*
* Returns amount of elements of scatterlist to load for subsequent
* transformations and pointer to writable trailer skb.
*/
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
{
int copyflag;
int elt;
struct sk_buff *skb1, **skb_p;
/* If skb is cloned or its head is paged, reallocate
* head pulling out all the pages (pages are considered not writable
* at the moment even if they are anonymous).
*/
if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
__pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
if (!skb_shinfo(skb)->frag_list) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
* space, 128 bytes is fair. */
if (skb_tailroom(skb) < tailbits &&
pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
return -ENOMEM;
/* Voila! */
*trailer = skb;
return 1;
}
/* Misery. We are in troubles, going to mincer fragments... */
elt = 1;
skb_p = &skb_shinfo(skb)->frag_list;
copyflag = 0;
while ((skb1 = *skb_p) != NULL) {
int ntail = 0;
/* The fragment is partially pulled by someone,
* this can happen on input. Copy it and everything
* after it. */
if (skb_shared(skb1))
copyflag = 1;
/* If the skb is the last, worry about trailer. */
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
skb_shinfo(skb1)->frag_list ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
if (copyflag ||
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
skb_shinfo(skb1)->frag_list) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
if (ntail == 0)
skb2 = skb_copy(skb1, GFP_ATOMIC);
else
skb2 = skb_copy_expand(skb1,
skb_headroom(skb1),
ntail,
GFP_ATOMIC);
if (unlikely(skb2 == NULL))
return -ENOMEM;
if (skb1->sk)
skb_set_owner_w(skb, skb1->sk);
/* Looking around. Are we still alive?
* OK, link new skb, drop old one */
skb2->next = skb1->next;
*skb_p = skb2;
kfree_skb(skb1);
skb1 = skb2;
}
elt++;
*trailer = skb1;
skb_p = &skb1->next;
}
return elt;
}
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
skb->len += len;
}
return skb_put(tail, len);
}
#define MAX_SG_ONSTACK 4
int esp_output(struct sk_buff *skb)
{
......@@ -575,7 +271,7 @@ void esp4_err(struct sk_buff *skb, u32 info)
skb->h.icmph->code != ICMP_FRAG_NEEDED)
return;
x = xfrm_state_lookup(iph->daddr, esph->spi, IPPROTO_ESP);
x = xfrm4_state_lookup(iph->daddr, esph->spi, IPPROTO_ESP);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA ESP/%08x/%08x\n",
......
......@@ -96,6 +96,7 @@
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
......@@ -2599,6 +2600,13 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
#endif /* CONFIG_PROC_FS */
#endif /* CONFIG_NET_CLS_ROUTE */
int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
int err = 0;
err = __ip_route_output_key((struct rtable**)dst, fl);
return err;
}
int __init ip_rt_init(void)
{
int i, order, goal, rc = 0;
......@@ -2680,6 +2688,7 @@ int __init ip_rt_init(void)
ip_rt_gc_interval;
add_timer(&rt_periodic_timer);
xfrm_dst_lookup_register(xfrm_dst_lookup, AF_INET);
#ifdef CONFIG_PROC_FS
if (rt_cache_proc_init())
goto out_enomem;
......
......@@ -8,9 +8,11 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
#include <net/xfrm.h>
#include <asm/scatterlist.h>
/*
* Algorithms supported by IPsec. These entries contain properties which
......@@ -348,3 +350,333 @@ int xfrm_count_enc_supported(void)
n++;
return n;
}
#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
void skb_ah_walk(const struct sk_buff *skb,
struct crypto_tfm *tfm, icv_update_fn_t icv_update)
{
int offset = 0;
int len = skb->len;
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_ah_walk(list, tfm, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
}
#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
/* Move to common area: it is shared with AH. */
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update)
{
int start = skb->len - skb->data_len;
int i, copy = start - offset;
struct scatterlist sg;
/* Checksum header. */
if (copy > 0) {
if (copy > len)
copy = len;
sg.page = virt_to_page(skb->data + offset);
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
if ((len -= copy) == 0)
return;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg.page = frag->page;
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
if (!(len -= copy))
return;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_icv_walk(list, tfm, offset-start, copy, icv_update);
if ((len -= copy) == 0)
return;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
}
/* Looking generic it is not used in another places. */
int
skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int start = skb->len - skb->data_len;
int i, copy = start - offset;
int elt = 0;
if (copy > 0) {
if (copy > len)
copy = len;
sg[elt].page = virt_to_page(skb->data + offset);
sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg[elt].length = copy;
elt++;
if ((len -= copy) == 0)
return elt;
offset += copy;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
BUG_TRAP(start <= offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg[elt].page = frag->page;
sg[elt].offset = frag->page_offset+offset-start;
sg[elt].length = copy;
elt++;
if (!(len -= copy))
return elt;
offset += copy;
}
start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
int end;
BUG_TRAP(start <= offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
if ((len -= copy) == 0)
return elt;
offset += copy;
}
start = end;
}
}
if (len)
BUG();
return elt;
}
/* Check that skb data bits are writable. If they are not, copy data
* to newly created private area. If "tailbits" is given, make sure that
* tailbits bytes beyond current end of skb are writable.
*
* Returns amount of elements of scatterlist to load for subsequent
* transformations and pointer to writable trailer skb.
*/
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
{
int copyflag;
int elt;
struct sk_buff *skb1, **skb_p;
/* If skb is cloned or its head is paged, reallocate
* head pulling out all the pages (pages are considered not writable
* at the moment even if they are anonymous).
*/
if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
__pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
if (!skb_shinfo(skb)->frag_list) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
* space, 128 bytes is fair. */
if (skb_tailroom(skb) < tailbits &&
pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
return -ENOMEM;
/* Voila! */
*trailer = skb;
return 1;
}
/* Misery. We are in troubles, going to mincer fragments... */
elt = 1;
skb_p = &skb_shinfo(skb)->frag_list;
copyflag = 0;
while ((skb1 = *skb_p) != NULL) {
int ntail = 0;
/* The fragment is partially pulled by someone,
* this can happen on input. Copy it and everything
* after it. */
if (skb_shared(skb1))
copyflag = 1;
/* If the skb is the last, worry about trailer. */
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
skb_shinfo(skb1)->frag_list ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
if (copyflag ||
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
skb_shinfo(skb1)->frag_list) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
if (ntail == 0)
skb2 = skb_copy(skb1, GFP_ATOMIC);
else
skb2 = skb_copy_expand(skb1,
skb_headroom(skb1),
ntail,
GFP_ATOMIC);
if (unlikely(skb2 == NULL))
return -ENOMEM;
if (skb1->sk)
skb_set_owner_w(skb, skb1->sk);
/* Looking around. Are we still alive?
* OK, link new skb, drop old one */
skb2->next = skb1->next;
*skb_p = skb2;
kfree_skb(skb1);
skb1 = skb2;
}
elt++;
*trailer = skb1;
skb_p = &skb1->next;
}
return elt;
}
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
{
if (tail != skb) {
skb->data_len += len;
skb->len += len;
}
return skb_put(tail, len);
}
#endif
/* Changes
*
* Mitsuru KANDA @USAGI : IPv6 Support
* Kazunori MIYAZAWA @USAGI :
* YOSHIFUJI Hideaki @USAGI :
* Kunihiro Ishiguro :
*
*/
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
static kmem_cache_t *secpath_cachep;
......@@ -64,7 +74,7 @@ int xfrm4_rcv(struct sk_buff *skb)
if (xfrm_nr == XFRM_MAX_DEPTH)
goto drop;
x = xfrm_state_lookup(iph->daddr, spi, iph->protocol);
x = xfrm4_state_lookup(iph->daddr, spi, iph->protocol);
if (x == NULL)
goto drop;
......@@ -157,3 +167,288 @@ void __init xfrm_input_init(void)
if (!secpath_cachep)
panic("IP: failed to allocate secpath_cache\n");
}
#if defined (CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
/* Fetch spi and seq frpm ipsec header */
static int xfrm6_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
{
int offset, offset_seq;
switch (nexthdr) {
case IPPROTO_AH:
offset = offsetof(struct ip_auth_hdr, spi);
offset_seq = offsetof(struct ip_auth_hdr, seq_no);
break;
case IPPROTO_ESP:
offset = offsetof(struct ip_esp_hdr, spi);
offset_seq = offsetof(struct ip_esp_hdr, seq_no);
break;
case IPPROTO_COMP:
if (!pskb_may_pull(skb, 4))
return -EINVAL;
*spi = *(u16*)(skb->h.raw + 2);
*seq = 0;
return 0;
default:
return 1;
}
if (!pskb_may_pull(skb, 16))
return -EINVAL;
*spi = *(u32*)(skb->h.raw + offset);
*seq = *(u32*)(skb->h.raw + offset_seq);
return 0;
}
static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
{
u8 *opt = (u8 *)opthdr;
int len = ipv6_optlen(opthdr);
int off = 0;
int optlen = 0;
off += 2;
len -= 2;
while (len > 0) {
switch (opt[off]) {
case IPV6_TLV_PAD0:
optlen = 1;
break;
default:
if (len < 2)
goto bad;
optlen = opt[off+1]+2;
if (len < optlen)
goto bad;
if (opt[off] & 0x20)
memset(&opt[off+2], 0, opt[off+1]);
break;
}
off += optlen;
len -= optlen;
}
if (len == 0)
return 1;
bad:
return 0;
}
int xfrm6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
unsigned int packet_len = skb->tail - skb->nh.raw;
u8 nexthdr = skb->nh.ipv6h->nexthdr;
u8 nextnexthdr = 0;
*nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
while (offset + 1 <= packet_len) {
switch (nexthdr) {
case NEXTHDR_HOP:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun hopopts\n");
return 0;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_DEST:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun destopt\n");
return 0;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_AUTH:
if (dir == XFRM_POLICY_OUT) {
memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
(((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
}
if (exthdr->nexthdr == NEXTHDR_DEST) {
offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
nextnexthdr = exthdr->nexthdr;
if (!zero_out_mutable_opts(exthdr)) {
if (net_ratelimit())
printk(KERN_WARNING "overrun destopt\n");
return 0;
}
}
return nexthdr;
default :
return nexthdr;
}
}
return nexthdr;
}
int xfrm6_rcv(struct sk_buff *skb)
{
int err;
u32 spi, seq;
struct xfrm_state *xfrm_vec[XFRM_MAX_DEPTH];
struct xfrm_state *x;
int xfrm_nr = 0;
int decaps = 0;
struct ipv6hdr *hdr = skb->nh.ipv6h;
unsigned char *tmp_hdr = NULL;
int hdr_len = 0;
u16 nh_offset = 0;
u8 nexthdr = 0;
if (hdr->nexthdr == IPPROTO_AH || hdr->nexthdr == IPPROTO_ESP) {
nh_offset = ((unsigned char*)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
hdr_len = sizeof(struct ipv6hdr);
} else {
hdr_len = skb->h.raw - skb->nh.raw;
}
tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_hdr)
goto drop;
memcpy(tmp_hdr, skb->nh.raw, hdr_len);
nexthdr = xfrm6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
hdr->priority = 0;
hdr->flow_lbl[0] = 0;
hdr->flow_lbl[1] = 0;
hdr->flow_lbl[2] = 0;
hdr->hop_limit = 0;
if ((err = xfrm6_parse_spi(skb, nexthdr, &spi, &seq)) != 0)
goto drop;
do {
struct ipv6hdr *iph = skb->nh.ipv6h;
if (xfrm_nr == XFRM_MAX_DEPTH)
goto drop;
x = xfrm6_state_lookup(&iph->daddr, spi, nexthdr);
if (x == NULL)
goto drop;
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID))
goto drop_unlock;
if (x->props.replay_window && xfrm_replay_check(x, seq))
goto drop_unlock;
nexthdr = x->type->input(x, skb);
if (nexthdr <= 0)
goto drop_unlock;
if (x->props.replay_window)
xfrm_replay_advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
xfrm_vec[xfrm_nr++] = x;
iph = skb->nh.ipv6h; /* ??? */
if (nexthdr == NEXTHDR_DEST) {
if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
!pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
err = -EINVAL;
goto drop;
}
nexthdr = skb->h.raw[0];
nh_offset = skb->h.raw - skb->nh.raw;
skb_pull(skb, (skb->h.raw[1]+1)<<3);
skb->h.raw = skb->data;
}
if (x->props.mode) { /* XXX */
if (iph->nexthdr != IPPROTO_IPV6)
goto drop;
skb->nh.raw = skb->data;
iph = skb->nh.ipv6h;
decaps = 1;
break;
}
if ((err = xfrm6_parse_spi(skb, nexthdr, &spi, &seq)) < 0)
goto drop;
} while (!err);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.raw[nh_offset] = nexthdr;
skb->nh.ipv6h->payload_len = htons(hdr_len + skb->len - sizeof(struct ipv6hdr));
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
if (!sp)
goto drop;
if (skb->sp) {
memcpy(sp, skb->sp, sizeof(struct sec_path));
secpath_put(skb->sp);
} else
sp->len = 0;
atomic_set(&sp->refcnt, 1);
skb->sp = sp;
}
if (xfrm_nr + skb->sp->len > XFRM_MAX_DEPTH)
goto drop;
memcpy(skb->sp->xvec+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(void*));
skb->sp->len += xfrm_nr;
if (decaps) {
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
skb->dst = NULL;
}
netif_rx(skb);
return 0;
} else {
return -nexthdr;
}
drop_unlock:
spin_unlock(&x->lock);
xfrm_state_put(x);
drop:
if (tmp_hdr) kfree(tmp_hdr);
while (--xfrm_nr >= 0)
xfrm_state_put(xfrm_vec[xfrm_nr]);
kfree_skb(skb);
return 0;
}
#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
This diff is collapsed.
This diff is collapsed.
......@@ -234,8 +234,8 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
switch (x->props.family) {
case AF_INET:
x1 = xfrm_state_lookup(x->props.saddr.xfrm4_addr,
x->id.spi, x->id.proto);
x1 = xfrm4_state_lookup(x->props.saddr.xfrm4_addr,
x->id.spi, x->id.proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
......@@ -265,7 +265,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
switch (p->family) {
case AF_INET:
x = xfrm_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
x = xfrm4_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
......@@ -395,7 +395,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
switch (p->family) {
case AF_INET:
x = xfrm_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
x = xfrm4_state_lookup(p->saddr.xfrm4_addr, p->spi, p->proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
......
......@@ -17,5 +17,20 @@ config IPV6_PRIVACY
See <file:Documentation/networking/ip-sysctl.txt> for details.
source "net/ipv6/netfilter/Kconfig"
config INET6_AH
tristate "IPv6: AH transformation"
depends on IPV6
---help---
Support for IPsec AH.
If unsure, say Y.
config INET6_ESP
tristate "IPv6: ESP transformation"
depends on IPV6
---help---
Support for IPsec ESP.
If unsure, say Y.
source "net/ipv6/netfilter/Kconfig"
......@@ -10,4 +10,6 @@ ipv6-objs := af_inet6.o ip6_output.o ip6_input.o addrconf.o sit.o \
exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
ip6_flowlabel.o ipv6_syms.o
obj-$(CONFIG_INET6_AH) += ah6.o
obj-$(CONFIG_INET6_ESP) += esp6.o
obj-$(CONFIG_NETFILTER) += netfilter/
/*
* Copyright (C)2002 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Authors
*
* Mitsuru KANDA @USAGI : IPv6 Support
* Kazunori MIYAZAWA @USAGI :
* Kunihiro Ishiguro :
*
* This file is derived from net/ipv4/ah.c.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/xfrm.h>
#include <asm/scatterlist.h>
#define AH_HLEN_NOICV 12
/* XXX no ipv6 ah specific */
#define NIP6(addr) \
ntohs((addr).s6_addr16[0]),\
ntohs((addr).s6_addr16[1]),\
ntohs((addr).s6_addr16[2]),\
ntohs((addr).s6_addr16[3]),\
ntohs((addr).s6_addr16[4]),\
ntohs((addr).s6_addr16[5]),\
ntohs((addr).s6_addr16[6]),\
ntohs((addr).s6_addr16[7])
int ah6_output(struct sk_buff *skb)
{
int err;
int hdr_len = sizeof(struct ipv6hdr);
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *iph = NULL;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
u16 nh_offset = 0;
u8 nexthdr;
printk(KERN_DEBUG "%s\n", __FUNCTION__);
if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL)
return -EINVAL;
spin_lock_bh(&x->lock);
if ((err = xfrm_state_check_expire(x)) != 0)
goto error;
if ((err = xfrm_state_check_space(x, skb)) != 0)
goto error;
if (x->props.mode) {
iph = skb->nh.ipv6h;
skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, x->props.header_len);
skb->nh.ipv6h->version = 6;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb->nh.ipv6h->nexthdr = IPPROTO_AH;
memcpy(&skb->nh.ipv6h->saddr, &x->props.saddr, sizeof(struct in6_addr));
memcpy(&skb->nh.ipv6h->daddr, &x->id.daddr, sizeof(struct in6_addr));
ah = (struct ip_auth_hdr*)(skb->nh.ipv6h+1);
ah->nexthdr = IPPROTO_IPV6;
} else {
hdr_len = skb->h.raw - skb->nh.raw;
iph = kmalloc(hdr_len, GFP_ATOMIC);
if (!iph) {
err = -ENOMEM;
goto error;
}
memcpy(iph, skb->data, hdr_len);
skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, x->props.header_len);
memcpy(skb->nh.ipv6h, iph, hdr_len);
nexthdr = xfrm6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_OUT);
if (nexthdr == 0)
goto error;
skb->nh.raw[nh_offset] = IPPROTO_AH;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
ah = (struct ip_auth_hdr*)(skb->nh.raw+hdr_len);
skb->h.raw = (unsigned char*) ah;
ah->nexthdr = nexthdr;
}
skb->nh.ipv6h->priority = 0;
skb->nh.ipv6h->flow_lbl[0] = 0;
skb->nh.ipv6h->flow_lbl[1] = 0;
skb->nh.ipv6h->flow_lbl[2] = 0;
skb->nh.ipv6h->hop_limit = 0;
ahp = x->data;
ah->hdrlen = (XFRM_ALIGN8(ahp->icv_trunc_len +
AH_HLEN_NOICV) >> 2) - 2;
ah->reserved = 0;
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, skb, ah->auth_data);
if (x->props.mode) {
skb->nh.ipv6h->hop_limit = iph->hop_limit;
skb->nh.ipv6h->priority = iph->priority;
skb->nh.ipv6h->flow_lbl[0] = iph->flow_lbl[0];
skb->nh.ipv6h->flow_lbl[1] = iph->flow_lbl[1];
skb->nh.ipv6h->flow_lbl[2] = iph->flow_lbl[2];
} else {
memcpy(skb->nh.ipv6h, iph, hdr_len);
skb->nh.raw[nh_offset] = IPPROTO_AH;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
kfree (iph);
}
skb->nh.raw = skb->data;
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if ((skb->dst = dst_pop(dst)) == NULL)
goto error_nolock;
return NET_XMIT_BYPASS;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
return err;
}
int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
struct ipv6hdr *iph;
struct ipv6_auth_hdr *ah;
struct ah_data *ahp;
unsigned char *tmp_hdr = NULL;
int hdr_len = skb->h.raw - skb->nh.raw;
u8 nexthdr = 0;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
ah = (struct ipv6_auth_hdr*)skb->data;
ahp = x->data;
ah_hlen = (ah->hdrlen + 2) << 2;
if (ah_hlen != XFRM_ALIGN8(ahp->icv_full_len + AH_HLEN_NOICV) &&
ah_hlen != XFRM_ALIGN8(ahp->icv_trunc_len + AH_HLEN_NOICV))
goto out;
if (!pskb_may_pull(skb, ah_hlen))
goto out;
/* We are going to _remove_ AH header to keep sockets happy,
* so... Later this can change. */
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
goto out;
tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_hdr)
goto out;
memcpy(tmp_hdr, skb->nh.raw, hdr_len);
ah = (struct ipv6_auth_hdr*)skb->data;
iph = skb->nh.ipv6h;
{
u8 auth_data[ahp->icv_trunc_len];
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
skb_push(skb, skb->data - skb->nh.raw);
ahp->icv(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
if (net_ratelimit())
printk(KERN_WARNING "ipsec ah authentication error\n");
x->stats.integrity_failed++;
goto free_out;
}
}
nexthdr = ah->nexthdr;
skb->nh.raw = skb_pull(skb, (ah->hdrlen+2)<<2);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
skb_pull(skb, hdr_len);
skb->h.raw = skb->data;
kfree(tmp_hdr);
return nexthdr;
free_out:
kfree(tmp_hdr);
out:
return -EINVAL;
}
void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
int type, int code, int offset, __u32 info)
{
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH ||
type != ICMPV6_PKT_TOOBIG)
return;
x = xfrm6_state_lookup(&iph->daddr, ah->spi, IPPROTO_AH);
if (!x)
return;
printk(KERN_DEBUG "pmtu discvovery on SA AH/%08x/"
"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
ntohl(ah->spi), NIP6(iph->daddr));
xfrm_state_put(x);
}
static int ah6_init_state(struct xfrm_state *x, void *args)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
/* null auth can use a zero length key */
if (x->aalg->alg_key_len > 512)
goto error;
ahp = kmalloc(sizeof(*ahp), GFP_KERNEL);
if (ahp == NULL)
return -ENOMEM;
memset(ahp, 0, sizeof(*ahp));
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (!ahp->tfm)
goto error;
ahp->icv = ah_hmac_digest;
/*
* Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(ahp->tfm)) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
if (!ahp->work_icv)
goto error;
x->props.header_len = XFRM_ALIGN8(ahp->icv_trunc_len + AH_HLEN_NOICV);
if (x->props.mode)
x->props.header_len += 20;
x->data = ahp;
return 0;
error:
if (ahp) {
if (ahp->work_icv)
kfree(ahp->work_icv);
if (ahp->tfm)
crypto_free_tfm(ahp->tfm);
kfree(ahp);
}
return -EINVAL;
}
static void ah6_destroy(struct xfrm_state *x)
{
struct ah_data *ahp = x->data;
if (ahp->work_icv) {
kfree(ahp->work_icv);
ahp->work_icv = NULL;
}
if (ahp->tfm) {
crypto_free_tfm(ahp->tfm);
ahp->tfm = NULL;
}
}
static struct xfrm_type ah6_type =
{
.description = "AH6",
.proto = IPPROTO_AH,
.init_state = ah6_init_state,
.destructor = ah6_destroy,
.input = ah6_input,
.output = ah6_output
};
static struct inet6_protocol ah6_protocol = {
.handler = xfrm6_rcv,
.err_handler = ah6_err,
};
int __init ah6_init(void)
{
SET_MODULE_OWNER(&ah6_type);
if (xfrm6_register_type(&ah6_type) < 0) {
printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
return -EAGAIN;
}
if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
xfrm6_unregister_type(&ah6_type);
return -EAGAIN;
}
return 0;
}
static void __exit ah6_fini(void)
{
if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
if (xfrm6_unregister_type(&ah6_type) < 0)
printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
}
module_init(ah6_init);
module_exit(ah6_fini);
MODULE_LICENSE("GPL");
This diff is collapsed.
......@@ -150,7 +150,8 @@ static inline int ip6_input_finish(struct sk_buff *skb)
It would be stupid to detect for optional headers,
which are missing with probability of 200%
*/
if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP) {
if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
nexthdr != NEXTHDR_AUTH && nexthdr != NEXTHDR_ESP) {
nhoff = ipv6_parse_exthdrs(&skb, nhoff);
if (nhoff < 0)
return 0;
......
......@@ -192,6 +192,11 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
int seg_len = skb->len;
int hlimit;
u32 mtu;
int err = 0;
if ((err = xfrm_lookup(&skb->dst, fl, sk, 0)) < 0) {
return err;
}
if (opt) {
int head_room;
......@@ -576,6 +581,13 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
}
pktlength = length;
if (dst) {
if ((err = xfrm_lookup(&dst, fl, sk, 0)) < 0) {
dst_release(dst);
return -ENETUNREACH;
}
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(fl->fl6_dst))
hlimit = np->mcast_hops;
......@@ -630,10 +642,8 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
err = 0;
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk, pktlength + 15 +
dev->hard_header_len,
flags & MSG_DONTWAIT, &err);
/* alloc skb with mtu as we do in the IPv4 stack for IPsec */
skb = sock_alloc_send_skb(sk, mtu, flags & MSG_DONTWAIT, &err);
if (skb == NULL) {
IP6_INC_STATS(Ip6OutDiscards);
......@@ -663,6 +673,8 @@ int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
err = getfrag(data, &hdr->saddr,
((char *) hdr) + (pktlength - length),
0, length);
if (!opt || !opt->dst1opt)
skb->h.raw = ((char *) hdr) + (pktlength - length);
if (!err) {
IP6_INC_STATS(Ip6OutRequests);
......
......@@ -71,6 +71,7 @@
#include <net/addrconf.h>
#include <net/icmp.h>
#include <net/flow.h>
#include <net/checksum.h>
#include <linux/proc_fs.h>
......@@ -335,8 +336,6 @@ ndisc_build_ll_hdr(struct sk_buff *skb, struct net_device *dev,
unsigned char ha[MAX_ADDR_LEN];
unsigned char *h_dest = NULL;
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
if (dev->hard_header) {
if (ipv6_addr_type(daddr) & IPV6_ADDR_MULTICAST) {
ndisc_mc_map(daddr, ha, dev, 1);
......@@ -373,10 +372,50 @@ ndisc_build_ll_hdr(struct sk_buff *skb, struct net_device *dev,
* Send a Neighbour Advertisement
*/
int ndisc_output(struct sk_buff *skb)
{
if (skb) {
struct neighbour *neigh = (skb->dst ? skb->dst->neighbour : NULL);
if (ndisc_build_ll_hdr(skb, skb->dev, &skb->nh.ipv6h->daddr, neigh, skb->len) == 0) {
kfree_skb(skb);
return -EINVAL;
}
dev_queue_xmit(skb);
return 0;
}
return -EINVAL;
}
static inline void ndisc_rt_init(struct rt6_info *rt, struct net_device *dev,
struct neighbour *neigh)
{
rt->rt6i_dev = dev;
rt->rt6i_nexthop = neigh;
rt->rt6i_expires = 0;
rt->rt6i_flags = RTF_LOCAL;
rt->rt6i_metric = 0;
rt->rt6i_hoplimit = 255;
rt->u.dst.output = ndisc_output;
}
static inline void ndisc_flow_init(struct flowi *fl, u8 type,
struct in6_addr *saddr, struct in6_addr *daddr)
{
memset(fl, 0, sizeof(*fl));
fl->fl6_src = saddr;
fl->fl6_dst = daddr;
fl->proto = IPPROTO_ICMPV6;
fl->uli_u.icmpt.type = type;
fl->uli_u.icmpt.code = 0;
}
static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
struct in6_addr *daddr, struct in6_addr *solicited_addr,
int router, int solicited, int override, int inc_opt)
int router, int solicited, int override, int inc_opt)
{
struct flowi fl;
struct rt6_info *rt = NULL;
struct dst_entry* dst;
struct sock *sk = ndisc_socket->sk;
struct nd_msg *msg;
int len;
......@@ -385,6 +424,22 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
rt = ndisc_get_dummy_rt();
if (!rt)
return;
ndisc_flow_init(&fl, NDISC_NEIGHBOUR_ADVERTISEMENT, solicited_addr, daddr);
ndisc_rt_init(rt, dev, neigh);
dst = (struct dst_entry*)rt;
dst_clone(dst);
err = xfrm_lookup(&dst, &fl, NULL, 0);
if (err < 0) {
dst_release(dst);
return;
}
if (inc_opt) {
if (dev->addr_len)
len += NDISC_OPT_SPACE(dev->addr_len);
......@@ -400,14 +455,10 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
return;
}
if (ndisc_build_ll_hdr(skb, dev, daddr, neigh, len) == 0) {
kfree_skb(skb);
return;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
ip6_nd_hdr(sk, skb, dev, solicited_addr, daddr, IPPROTO_ICMPV6, len);
msg = (struct nd_msg *) skb_put(skb, len);
skb->h.raw = (unsigned char*) msg = (struct nd_msg *) skb_put(skb, len);
msg->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
msg->icmph.icmp6_code = 0;
......@@ -430,7 +481,9 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
csum_partial((__u8 *) msg,
len, 0));
dev_queue_xmit(skb);
dst_clone(dst);
skb->dst = dst;
dst_output(skb);
ICMP6_INC_STATS(Icmp6OutNeighborAdvertisements);
ICMP6_INC_STATS(Icmp6OutMsgs);
......@@ -440,6 +493,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
struct in6_addr *solicit,
struct in6_addr *daddr, struct in6_addr *saddr)
{
struct flowi fl;
struct rt6_info *rt = NULL;
struct dst_entry* dst;
struct sock *sk = ndisc_socket->sk;
struct sk_buff *skb;
struct nd_msg *msg;
......@@ -454,6 +510,22 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
saddr = &addr_buf;
}
rt = ndisc_get_dummy_rt();
if (!rt)
return;
ndisc_flow_init(&fl, NDISC_NEIGHBOUR_SOLICITATION, saddr, daddr);
ndisc_rt_init(rt, dev, neigh);
dst = (struct dst_entry*)rt;
dst_clone(dst);
err = xfrm_lookup(&dst, &fl, NULL, 0);
if (err < 0) {
dst_release(dst);
return;
}
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
send_llinfo = dev->addr_len && ipv6_addr_type(saddr) != IPV6_ADDR_ANY;
if (send_llinfo)
......@@ -466,14 +538,10 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
return;
}
if (ndisc_build_ll_hdr(skb, dev, daddr, neigh, len) == 0) {
kfree_skb(skb);
return;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
msg = (struct nd_msg *)skb_put(skb, len);
skb->h.raw = (unsigned char*) msg = (struct nd_msg *)skb_put(skb, len);
msg->icmph.icmp6_type = NDISC_NEIGHBOUR_SOLICITATION;
msg->icmph.icmp6_code = 0;
msg->icmph.icmp6_cksum = 0;
......@@ -492,7 +560,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
csum_partial((__u8 *) msg,
len, 0));
/* send it! */
dev_queue_xmit(skb);
dst_clone(dst);
skb->dst = dst;
dst_output(skb);
ICMP6_INC_STATS(Icmp6OutNeighborSolicits);
ICMP6_INC_STATS(Icmp6OutMsgs);
......@@ -501,6 +571,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr)
{
struct flowi fl;
struct rt6_info *rt = NULL;
struct dst_entry* dst;
struct sock *sk = ndisc_socket->sk;
struct sk_buff *skb;
struct icmp6hdr *hdr;
......@@ -508,6 +581,22 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
int len;
int err;
rt = ndisc_get_dummy_rt();
if (!rt)
return;
ndisc_flow_init(&fl, NDISC_ROUTER_SOLICITATION, saddr, daddr);
ndisc_rt_init(rt, dev, NULL);
dst = (struct dst_entry*)rt;
dst_clone(dst);
err = xfrm_lookup(&dst, &fl, NULL, 0);
if (err < 0) {
dst_release(dst);
return;
}
len = sizeof(struct icmp6hdr);
if (dev->addr_len)
len += NDISC_OPT_SPACE(dev->addr_len);
......@@ -519,14 +608,10 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
return;
}
if (ndisc_build_ll_hdr(skb, dev, daddr, NULL, len) == 0) {
kfree_skb(skb);
return;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
hdr = (struct icmp6hdr *) skb_put(skb, len);
skb->h.raw = (unsigned char*) hdr = (struct icmp6hdr *) skb_put(skb, len);
hdr->icmp6_type = NDISC_ROUTER_SOLICITATION;
hdr->icmp6_code = 0;
hdr->icmp6_cksum = 0;
......@@ -543,7 +628,9 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr,
csum_partial((__u8 *) hdr, len, 0));
/* send it! */
dev_queue_xmit(skb);
dst_clone(dst);
skb->dst = dst;
dst_output(skb);
ICMP6_INC_STATS(Icmp6OutRouterSolicits);
ICMP6_INC_STATS(Icmp6OutMsgs);
......@@ -1125,6 +1212,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
struct in6_addr *addrp;
struct net_device *dev;
struct rt6_info *rt;
struct dst_entry *dst;
struct flowi fl;
u8 *opt;
int rd_len;
int err;
......@@ -1136,6 +1225,22 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
if (rt == NULL)
return;
dst = (struct dst_entry*)rt;
if (ipv6_get_lladdr(dev, &saddr_buf)) {
ND_PRINTK1("redirect: no link_local addr for dev\n");
return;
}
ndisc_flow_init(&fl, NDISC_REDIRECT, &saddr_buf, &skb->nh.ipv6h->saddr);
dst_clone(dst);
err = xfrm_lookup(&dst, &fl, NULL, 0);
if (err) {
dst_release(dst);
return;
}
if (rt->rt6i_flags & RTF_GATEWAY) {
ND_PRINTK1("ndisc_send_redirect: not a neighbour\n");
dst_release(&rt->u.dst);
......@@ -1164,11 +1269,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
rd_len &= ~0x7;
len += rd_len;
if (ipv6_get_lladdr(dev, &saddr_buf)) {
ND_PRINTK1("redirect: no link_local addr for dev\n");
return;
}
buff = sock_alloc_send_skb(sk, MAX_HEADER + len + dev->hard_header_len + 15,
0, &err);
if (buff == NULL) {
......@@ -1178,15 +1278,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
hlen = 0;
if (ndisc_build_ll_hdr(buff, dev, &skb->nh.ipv6h->saddr, NULL, len) == 0) {
kfree_skb(buff);
return;
}
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
ip6_nd_hdr(sk, buff, dev, &saddr_buf, &skb->nh.ipv6h->saddr,
IPPROTO_ICMPV6, len);
icmph = (struct icmp6hdr *) skb_put(buff, len);
skb->h.raw = (unsigned char*) icmph = (struct icmp6hdr *) skb_put(buff, len);
memset(icmph, 0, sizeof(struct icmp6hdr));
icmph->icmp6_type = NDISC_REDIRECT;
......@@ -1224,7 +1320,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
len, IPPROTO_ICMPV6,
csum_partial((u8 *) icmph, len, 0));
dev_queue_xmit(buff);
skb->dst = dst;
dst_output(skb);
ICMP6_INC_STATS(Icmp6OutRedirects);
ICMP6_INC_STATS(Icmp6OutMsgs);
......
......@@ -45,6 +45,7 @@
#include <net/inet_common.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
struct sock *raw_v6_htable[RAWV6_HTABLE_SIZE];
rwlock_t raw_v6_lock = RW_LOCK_UNLOCKED;
......@@ -304,6 +305,11 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
struct inet_opt *inet = inet_sk(sk);
struct raw6_opt *raw_opt = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
if (!raw_opt->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
......
......@@ -49,6 +49,8 @@
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <asm/uaccess.h>
......@@ -128,6 +130,12 @@ struct fib6_node ip6_routing_table = {
rwlock_t rt6_lock = RW_LOCK_UNLOCKED;
/* Dummy rt for ndisc */
struct rt6_info *ndisc_get_dummy_rt()
{
return dst_alloc(&ip6_dst_ops);
}
/*
* Route lookup. Any rt6_lock is implied.
*/
......@@ -1859,6 +1867,14 @@ ctl_table ipv6_route_table[] = {
#endif
int xfrm6_dst_lookup(struct xfrm_dst **dst, struct flowi *fl)
{
int err = 0;
*dst = (struct xfrm_dst*)ip6_route_output(NULL, fl);
if (!*dst)
err = -ENETUNREACH;
return err;
}
void __init ip6_route_init(void)
{
......@@ -1867,6 +1883,7 @@ void __init ip6_route_init(void)
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
fib6_init();
xfrm_dst_lookup_register(xfrm6_dst_lookup, AF_INET6);
#ifdef CONFIG_PROC_FS
proc_net_create("ipv6_route", 0, rt6_proc_info);
proc_net_create("rt6_stats", 0, rt6_proc_stats);
......@@ -1880,7 +1897,7 @@ void ip6_route_cleanup(void)
proc_net_remove("ipv6_route");
proc_net_remove("rt6_stats");
#endif
xfrm_dst_lookup_unregister(AF_INET6);
rt6_ifdown(NULL);
fib6_gc_cleanup();
}
......
......@@ -50,6 +50,7 @@
#include <net/ip6_route.h>
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <asm/uaccess.h>
......@@ -677,6 +678,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl.nl_u.ip6_u.daddr = rt0->addr;
}
if (!fl.fl6_src)
fl.fl6_src = &np->saddr;
dst = ip6_route_output(sk, &fl);
if ((err = dst->error) != 0) {
......@@ -1637,6 +1641,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
if (sk_filter(sk, skb, 0))
goto discard_and_relse;
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_it;
skb->dev = NULL;
bh_lock_sock(sk);
......@@ -1652,6 +1659,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
return ret;
no_tcp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(TcpInErrs);
......@@ -1671,8 +1681,11 @@ static int tcp_v6_rcv(struct sk_buff *skb)
discard_and_relse:
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TcpInErrs);
sock_put(sk);
......
......@@ -50,6 +50,7 @@
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
......@@ -541,6 +542,11 @@ static void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
return -1;
}
#if defined(CONFIG_FILTER)
if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) {
if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
......@@ -646,6 +652,9 @@ static int udpv6_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto short_packet;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
uh = skb->h.uh;
......
......@@ -550,8 +550,8 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct sadb_msg *hdr, void **
switch (((struct sockaddr *)(addr + 1))->sa_family) {
case AF_INET:
x = xfrm_state_lookup(((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr,
sa->sadb_sa_spi, proto);
x = xfrm4_state_lookup(((struct sockaddr_in *)(addr + 1))->sin_addr.s_addr,
sa->sadb_sa_spi, proto);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
......@@ -1097,18 +1097,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
min_spi = htonl(0x100);
max_spi = htonl(0x0fffffff);
}
switch (x->props.family) {
case AF_INET:
xfrm_alloc_spi(x, min_spi, max_spi);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
xfrm6_alloc_spi(x, min_spi, max_spi);
break;
#endif
default:
break;
}
xfrm_alloc_spi(x, min_spi, max_spi);
if (x->id.spi)
resp_skb = pfkey_xfrm_state2msg(x, 0, 3);
}
......
......@@ -296,11 +296,11 @@ EXPORT_SYMBOL(__xfrm_policy_check);
EXPORT_SYMBOL(__xfrm_route_forward);
EXPORT_SYMBOL(xfrm_state_alloc);
EXPORT_SYMBOL(__xfrm_state_destroy);
EXPORT_SYMBOL(xfrm_state_find);
EXPORT_SYMBOL(xfrm4_state_find);
EXPORT_SYMBOL(xfrm_state_insert);
EXPORT_SYMBOL(xfrm_state_check_expire);
EXPORT_SYMBOL(xfrm_state_check_space);
EXPORT_SYMBOL(xfrm_state_lookup);
EXPORT_SYMBOL(xfrm4_state_lookup);
EXPORT_SYMBOL(xfrm_replay_check);
EXPORT_SYMBOL(xfrm_replay_advance);
EXPORT_SYMBOL(xfrm_check_selectors);
......@@ -324,13 +324,17 @@ EXPORT_SYMBOL(xfrm_policy_walk);
EXPORT_SYMBOL(xfrm_policy_flush);
EXPORT_SYMBOL(xfrm_policy_byid);
EXPORT_SYMBOL(xfrm_policy_list);
EXPORT_SYMBOL(xfrm_dst_lookup_register);
EXPORT_SYMBOL(xfrm_dst_lookup_unregister);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
EXPORT_SYMBOL(xfrm6_state_find);
EXPORT_SYMBOL(xfrm6_rcv);
EXPORT_SYMBOL(xfrm6_state_lookup);
EXPORT_SYMBOL(xfrm6_find_acq);
EXPORT_SYMBOL(xfrm6_alloc_spi);
EXPORT_SYMBOL(xfrm6_register_type);
EXPORT_SYMBOL(xfrm6_unregister_type);
EXPORT_SYMBOL(xfrm6_get_type);
EXPORT_SYMBOL(xfrm6_clear_mutable_options);
#endif
EXPORT_SYMBOL_GPL(xfrm_probe_algs);
......@@ -342,6 +346,15 @@ EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
EXPORT_SYMBOL_GPL(skb_ah_walk);
#endif
#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
EXPORT_SYMBOL_GPL(skb_cow_data);
EXPORT_SYMBOL_GPL(pskb_put);
EXPORT_SYMBOL_GPL(skb_icv_walk);
EXPORT_SYMBOL_GPL(skb_to_sgvec);
#endif
#if defined (CONFIG_IPV6_MODULE) || defined (CONFIG_IP_SCTP_MODULE)
/* inet functions common to v4 and v6 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment