Commit aedcae64 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 8309fa38 71f55998
......@@ -109,6 +109,9 @@ It's a good idea to avoid using lots of macros and use inlined functions
instead, as gcc does a good job with inlining, while excessive use of
macros can cause compilation problems on some platforms.
Also check the TODO list at the web site listed below to see what people
might already be working on.
BUGS
......
......@@ -413,6 +413,15 @@ L: dev-etrax@axis.com
W: http://developer.axis.com
S: Maintained
CRYPTO API
P: James Morris
M: jmorris@intercode.com.au
P: David S. Miller
M: davem@redhat.com
W http://samba.org/~jamesm/crypto/
L: linux-kernel@vger.kernel.org
S: Maintained
CYBERPRO FB DRIVER
P: Russell King
M: rmk@arm.linux.org.uk
......
......@@ -74,19 +74,39 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
return -EINVAL;
}
static void crypto_init_ops(struct crypto_tfm *tfm)
static int crypto_init_ops(struct crypto_tfm *tfm)
{
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
crypto_init_cipher_ops(tfm);
return crypto_init_cipher_ops(tfm);
case CRYPTO_ALG_TYPE_DIGEST:
return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMP:
return crypto_init_compress_ops(tfm);
default:
break;
}
BUG();
return -EINVAL;
}
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
break;
case CRYPTO_ALG_TYPE_DIGEST:
crypto_init_digest_ops(tfm);
crypto_exit_digest_ops(tfm);
break;
case CRYPTO_ALG_TYPE_COMP:
crypto_init_compress_ops(tfm);
crypto_exit_compress_ops(tfm);
break;
default:
......@@ -110,6 +130,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
memset(tfm, 0, sizeof(*tfm));
memset(tfm, 0, sizeof(*tfm));
if (alg->cra_ctxsize) {
tfm->crt_ctx = kmalloc(alg->cra_ctxsize, GFP_KERNEL);
if (tfm->crt_ctx == NULL)
......@@ -128,8 +150,11 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
if (crypto_init_flags(tfm, flags))
goto out_free_work_block;
crypto_init_ops(tfm);
if (crypto_init_ops(tfm)) {
crypto_exit_ops(tfm);
goto out_free_ctx;
}
goto out;
out_free_work_block:
......
......@@ -234,27 +234,19 @@ static int nocrypt(struct crypto_tfm *tfm,
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
{
struct crypto_alg *alg = tfm->__crt_alg;
u32 mode = flags & CRYPTO_TFM_MODE_MASK;
tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
if (alg->cra_cipher.cia_ivsize && mode != CRYPTO_TFM_MODE_ECB) {
tfm->crt_cipher.cit_iv =
kmalloc(alg->cra_cipher.cia_ivsize, GFP_KERNEL);
if (tfm->crt_cipher.cit_iv == NULL)
return -ENOMEM;
} else
tfm->crt_cipher.cit_iv = NULL;
if (flags & CRYPTO_TFM_REQ_WEAK_KEY)
tfm->crt_flags = CRYPTO_TFM_REQ_WEAK_KEY;
return 0;
}
void crypto_init_cipher_ops(struct crypto_tfm *tfm)
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct crypto_alg *alg = tfm->__crt_alg;
struct cipher_tfm *ops = &tfm->crt_cipher;
ops->cit_setkey = setkey;
......@@ -283,4 +275,20 @@ void crypto_init_cipher_ops(struct crypto_tfm *tfm)
default:
BUG();
}
if (alg->cra_cipher.cia_ivsize &&
ops->cit_mode != CRYPTO_TFM_MODE_ECB) {
ops->cit_iv = kmalloc(alg->cra_cipher.cia_ivsize, GFP_KERNEL);
if (ops->cit_iv == NULL)
ret = -ENOMEM;
}
return ret;
}
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
{
if (tfm->crt_cipher.cit_iv)
kfree(tfm->crt_cipher.cit_iv);
}
......@@ -33,10 +33,14 @@ int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
return crypto_cipher_flags(flags) ? -EINVAL : 0;
}
void crypto_init_compress_ops(struct crypto_tfm *tfm)
int crypto_init_compress_ops(struct crypto_tfm *tfm)
{
struct compress_tfm *ops = &tfm->crt_compress;
ops->cot_compress = crypto_compress;
ops->cot_decompress = crypto_decompress;
return 0;
}
void crypto_exit_compress_ops(struct crypto_tfm *tfm)
{ }
......@@ -63,12 +63,19 @@ int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
return crypto_cipher_flags(flags) ? -EINVAL : 0;
}
void crypto_init_digest_ops(struct crypto_tfm *tfm)
int crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct digest_tfm *ops = &tfm->crt_digest;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
return crypto_alloc_hmac_block(tfm);
}
void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{
crypto_free_hmac_block(tfm);
}
......@@ -17,6 +17,7 @@
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include "internal.h"
......@@ -31,18 +32,39 @@ static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
}
int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
{
int ret = 0;
BUG_ON(!crypto_tfm_alg_blocksize(tfm));
tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm),
GFP_KERNEL);
if (tfm->crt_digest.dit_hmac_block == NULL)
ret = -ENOMEM;
return ret;
}
void crypto_free_hmac_block(struct crypto_tfm *tfm)
{
if (tfm->crt_digest.dit_hmac_block)
kfree(tfm->crt_digest.dit_hmac_block);
}
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen)
{
unsigned int i;
struct scatterlist tmp;
char *ipad = tfm->crt_work_block;
char *ipad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
}
memset(ipad, 0, crypto_tfm_alg_blocksize(tfm) + 1);
memset(ipad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(ipad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
......@@ -67,8 +89,8 @@ void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
{
unsigned int i;
struct scatterlist tmp;
char *opad = tfm->crt_work_block;
char *opad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
......@@ -76,7 +98,7 @@ void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
crypto_digest_final(tfm, out);
memset(opad, 0, crypto_tfm_alg_blocksize(tfm) + 1);
memset(opad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(opad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
......
......@@ -52,13 +52,30 @@ static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
}
#endif
#ifdef CONFIG_CRYPTO_HMAC
int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
void crypto_free_hmac_block(struct crypto_tfm *tfm);
#else
static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
{
return 0;
}
static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
{ }
#endif
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
void crypto_init_digest_ops(struct crypto_tfm *tfm);
void crypto_init_cipher_ops(struct crypto_tfm *tfm);
void crypto_init_compress_ops(struct crypto_tfm *tfm);
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
#endif /* _CRYPTO_INTERNAL_H */
......@@ -2,13 +2,13 @@
# Token Ring driver configuration
#
menu "Token Ring devices"
menu "Token Ring devices (depends on LLC=y)"
depends on NETDEVICES
# So far, we only have PCI, ISA, and MCA token ring devices
config TR
bool "Token Ring driver support"
depends on PCI || ISA || MCA
depends on (PCI || ISA || MCA) && LLC=y
help
Token Ring is IBM's way of communication on a local network; the
rest of the world uses Ethernet. To participate on a Token Ring
......
......@@ -144,6 +144,9 @@ struct digest_tfm {
void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
unsigned int nsg, u8 *out);
#ifdef CONFIG_CRYPTO_HMAC
void *dit_hmac_block;
#endif
};
struct compress_tfm {
......@@ -196,12 +199,7 @@ static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
if (alg->cra_module)
return alg->cra_module->name;
else
return NULL;
return module_name(tfm->__crt_alg->cra_module);
}
static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
......
......@@ -110,8 +110,8 @@ extern int init_netlink(void);
extern struct sock *netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len));
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, int allocation);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, int allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
......
......@@ -27,6 +27,7 @@
*
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -72,6 +73,7 @@ typedef enum {
SCTP_CMD_STRIKE, /* Mark a strike against a transport. */
SCTP_CMD_TRANSMIT, /* Transmit the outqueue. */
SCTP_CMD_HB_TIMERS_START, /* Start the heartbeat timers. */
SCTP_CMD_HB_TIMERS_UPDATE, /* Update the heartbeat timers. */
SCTP_CMD_TRANSPORT_RESET, /* Reset the status of a transport. */
SCTP_CMD_TRANSPORT_ON, /* Mark the transport as active. */
SCTP_CMD_REPORT_ERROR, /* Pass this error back out of the sm. */
......@@ -83,6 +85,7 @@ typedef enum {
SCTP_CMD_UPDATE_ASSOC, /* Update association information. */
SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */
SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */
SCTP_CMD_LAST
} sctp_verb_t;
......
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
*
* The base lksctp header.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
*
* The base lksctp header.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* The SCTP reference implementation is distributed in the hope that it
*
* The SCTP reference implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Xingang Guo <xingang.guo@intel.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
*
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
......@@ -52,10 +52,10 @@
* structs
* prototypes
* macros, externs, and inlines
*
* Move test_frame specific items out of the kernel headers
*
* Move test_frame specific items out of the kernel headers
* and into the test frame headers. This is not perfect in any sense
* and will continue to evolve.
* and will continue to evolve.
*/
......@@ -78,7 +78,7 @@
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
#include <net/ip6_route.h>
#endif
#endif
#include <asm/uaccess.h>
#include <asm/page.h>
......@@ -105,19 +105,19 @@
#endif
/* Certain internal static functions need to be exported when
/* Certain internal static functions need to be exported when
* compiled into the test frame.
*/
#ifndef SCTP_STATIC
#define SCTP_STATIC static
#endif
/*
* Function declarations.
/*
* Function declarations.
*/
/*
* sctp_protocol.c
* sctp_protocol.c
*/
extern sctp_protocol_t sctp_proto;
extern struct sock *sctp_get_ctl_sock(void);
......@@ -142,7 +142,7 @@ extern int sctp_primitive_ASSOCIATE(sctp_association_t *, void *arg);
extern int sctp_primitive_SHUTDOWN(sctp_association_t *, void *arg);
extern int sctp_primitive_ABORT(sctp_association_t *, void *arg);
extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern int sctp_primitive_REQUESTHEARTBEAT(sctp_association_t *, void *arg);
/*
* sctp_crc32c.c
......@@ -418,6 +418,19 @@ static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
return retval;
}
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
* there is room for a param header too.
*/
#define sctp_walk_params(pos, chunk, member)\
_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)); \
pos.v += WORD_ROUND(ntohs(pos.p->length)))
/* Round an int up to the next multiple of 4. */
#define WORD_ROUND(s) (((s)+3)&~3)
......@@ -460,6 +473,26 @@ static inline sctp_protocol_t *sctp_get_protocol(void)
return &sctp_proto;
}
/* Convert from an IP version number to an Address Family symbol. */
static inline int ipver2af(__u8 ipver)
{
int family;
switch (ipver) {
case 4:
family = AF_INET;
break;
case 6:
family = AF_INET6;
break;
default:
family = 0;
break;
};
return family;
}
/* Warning: The following hash functions assume a power of two 'size'. */
/* This is the hash function for the SCTP port hash table. */
static inline int sctp_phashfn(__u16 lport)
......
......@@ -156,6 +156,7 @@ sctp_state_fn_t sctp_sf_shutdown_ack_sent_prm_abort;
sctp_state_fn_t sctp_sf_error_closed;
sctp_state_fn_t sctp_sf_error_shutdown;
sctp_state_fn_t sctp_sf_ignore_primitive;
sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
/* Prototypes for other event state functions. */
sctp_state_fn_t sctp_sf_do_9_2_start_shutdown;
......@@ -205,9 +206,6 @@ sctp_association_t *sctp_make_temp_asoc(const sctp_endpoint_t *,
sctp_chunk_t *,
const int priority);
__u32 sctp_generate_verification_tag(void);
sctpParam_t sctp_get_my_addrs_raw(const sctp_association_t *,
const int priority, int *addrs_len);
void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
/* Prototypes for chunk-building functions. */
......@@ -333,10 +331,10 @@ __u32 sctp_generate_tag(const sctp_endpoint_t *);
__u32 sctp_generate_tsn(const sctp_endpoint_t *);
/* 4th level prototypes */
void sctp_param2sockaddr(sockaddr_storage_t *addr, sctp_addr_param_t *,
void sctp_param2sockaddr(union sctp_addr *addr, sctp_addr_param_t *,
__u16 port);
int sctp_addr2sockaddr(const sctpParam_t, sockaddr_storage_t *);
int sockaddr2sctp_addr(const sockaddr_storage_t *, sctp_addr_param_t *);
int sctp_addr2sockaddr(const union sctp_params, union sctp_addr *);
int sockaddr2sctp_addr(const union sctp_addr *, sctp_addr_param_t *);
/* Extern declarations for major data structures. */
sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t, sctp_state_t);
......
This diff is collapsed.
......@@ -195,6 +195,7 @@ struct xfrm_policy
/* This lock only affects elements except for entry. */
rwlock_t lock;
atomic_t refcnt;
struct timer_list timer;
u32 priority;
u32 index;
......
......@@ -266,14 +266,14 @@ config VLAN_8021Q
tristate "802.1Q VLAN Support"
config LLC
tristate "ANSI/IEEE 802.2 Data link layer protocol (IPX, Appletalk)"
tristate "ANSI/IEEE 802.2 - aka LLC (IPX, Appletalk, Token Ring)"
help
This is a Logical Link Layer protocol used for Appletalk, IPX and in
the future by NetBEUI and by the linux-sna project. It originally
came from Procom Inc. that released the code for 2.0.36 and was
ported to 2.{4,5}. Select this if you want to have support for
those protocols or if you want to have the sockets interface for
LLC.
This is a Logical Link Layer protocol used for Appletalk, IPX,
Token Ring devices, the linux-sna.org project and in the future by
NetBEUI. It originally came from Procom Inc. that released the code
for 2.0.36 and was heavily modified to work with 2.{4,5}.
Select this if you want to have support for those protocols or if
you want to have the sockets interface for LLC.
config LLC_UI
......
......@@ -234,11 +234,12 @@ int ah_output(struct sk_buff *skb)
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if ((skb->dst = dst_pop(dst)) == NULL)
goto error;
goto error_nolock;
return NET_XMIT_BYPASS;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
return err;
}
......
......@@ -435,11 +435,12 @@ int esp_output(struct sk_buff *skb)
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if ((skb->dst = dst_pop(dst)) == NULL)
goto error;
goto error_nolock;
return NET_XMIT_BYPASS;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
return err;
}
......
......@@ -204,6 +204,50 @@ void xfrm_put_type(struct xfrm_type *type)
__MOD_DEC_USE_COUNT(type->owner);
}
static inline unsigned long make_jiffies(long secs)
{
if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
return MAX_SCHEDULE_TIMEOUT-1;
else
return secs*HZ;
}
static void xfrm_policy_timer(unsigned long data)
{
struct xfrm_policy *xp = (struct xfrm_policy*)data;
unsigned long now = (unsigned long)xtime.tv_sec;
long next = LONG_MAX;
if (xp->dead)
goto out;
if (xp->lft.hard_add_expires_seconds) {
long tmo = xp->lft.hard_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (next != LONG_MAX &&
!mod_timer(&xp->timer, jiffies + make_jiffies(next)))
atomic_inc(&xp->refcnt);
out:
xfrm_pol_put(xp);
return;
expired:
xfrm_pol_put(xp);
/* Not 100% correct. id can be recycled in theory */
xp = xfrm_policy_byid(0, xp->index, 1);
if (xp) {
xfrm_policy_kill(xp);
xfrm_pol_put(xp);
}
}
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
......@@ -219,6 +263,9 @@ struct xfrm_policy *xfrm_policy_alloc(int gfp)
memset(policy, 0, sizeof(struct xfrm_policy));
atomic_set(&policy->refcnt, 1);
policy->lock = RW_LOCK_UNLOCKED;
init_timer(&policy->timer);
policy->timer.data = (unsigned long)policy;
policy->timer.function = xfrm_policy_timer;
}
return policy;
}
......@@ -233,6 +280,9 @@ void __xfrm_policy_destroy(struct xfrm_policy *policy)
if (policy->bundles)
BUG();
if (del_timer(&policy->timer))
BUG();
kfree(policy);
}
......@@ -255,6 +305,9 @@ void xfrm_policy_kill(struct xfrm_policy *policy)
dst_free(dst);
}
if (del_timer(&policy->timer))
atomic_dec(&policy->refcnt);
out:
write_unlock_bh(&policy->lock);
}
......@@ -302,6 +355,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
policy->index = pol ? pol->index : xfrm_gen_index(dir);
policy->curlft.add_time = (unsigned long)xtime.tv_sec;
policy->curlft.use_time = 0;
if (policy->lft.hard_add_expires_seconds &&
!mod_timer(&policy->timer, jiffies + HZ))
atomic_inc(&policy->refcnt);
write_unlock_bh(&xfrm_policy_lock);
if (pol) {
......@@ -380,7 +436,7 @@ int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
int count = 0;
int error = 0;
read_lock(&xfrm_policy_lock);
read_lock_bh(&xfrm_policy_lock);
for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
count++;
......@@ -400,7 +456,7 @@ int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
}
out:
read_unlock(&xfrm_policy_lock);
read_unlock_bh(&xfrm_policy_lock);
return error;
}
......@@ -411,7 +467,7 @@ struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl)
{
struct xfrm_policy *pol;
read_lock(&xfrm_policy_lock);
read_lock_bh(&xfrm_policy_lock);
for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
struct xfrm_selector *sel = &pol->selector;
......@@ -420,7 +476,7 @@ struct xfrm_policy *xfrm_policy_lookup(int dir, struct flowi *fl)
break;
}
}
read_unlock(&xfrm_policy_lock);
read_unlock_bh(&xfrm_policy_lock);
return pol;
}
......@@ -428,14 +484,14 @@ struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi
{
struct xfrm_policy *pol;
read_lock(&xfrm_policy_lock);
read_lock_bh(&xfrm_policy_lock);
if ((pol = sk->policy[dir]) != NULL) {
if (xfrm4_selector_match(&pol->selector, fl))
atomic_inc(&pol->refcnt);
else
pol = NULL;
}
read_unlock(&xfrm_policy_lock);
read_unlock_bh(&xfrm_policy_lock);
return pol;
}
......@@ -727,8 +783,7 @@ int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
return 0;
}
if (!policy->curlft.use_time)
policy->curlft.use_time = (unsigned long)xtime.tv_sec;
policy->curlft.use_time = (unsigned long)xtime.tv_sec;
switch (policy->action) {
case XFRM_POLICY_BLOCK:
......@@ -936,8 +991,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
if (!pol)
return 1;
if (!pol->curlft.use_time)
pol->curlft.use_time = (unsigned long)xtime.tv_sec;
pol->curlft.use_time = (unsigned long)xtime.tv_sec;
if (pol->action == XFRM_POLICY_ALLOW) {
if (pol->xfrm_nr != 0) {
......
......@@ -28,7 +28,7 @@ DECLARE_WAIT_QUEUE_HEAD(km_waitq);
static void __xfrm_state_delete(struct xfrm_state *x);
unsigned long make_jiffies(long secs)
static inline unsigned long make_jiffies(long secs)
{
if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
return MAX_SCHEDULE_TIMEOUT-1;
......@@ -92,7 +92,14 @@ static void xfrm_timer_handler(unsigned long data)
goto out;
expired:
km_expired(x);
if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
x->km.state = XFRM_STATE_EXPIRED;
wake_up(&km_waitq);
next = 2;
goto resched;
}
if (x->id.spi != 0)
km_expired(x);
__xfrm_state_delete(x);
out:
......@@ -298,11 +305,13 @@ xfrm_state_find(u32 daddr, u32 saddr, struct flowi *fl, struct xfrm_tmpl *tmpl,
x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
x = NULL;
error = 1;
}
}
spin_unlock_bh(&xfrm_state_lock);
if (!x)
*err = acquire_in_progress ? -EAGAIN : -ENOMEM;
*err = acquire_in_progress ? -EAGAIN :
(error ? -ESRCH : -ENOMEM);
return x;
}
......@@ -612,6 +621,7 @@ void km_expired(struct xfrm_state *x)
list_for_each_entry(km, &xfrm_km_list, list)
km->notify(x, 1);
read_unlock(&xfrm_km_lock);
wake_up(&km_waitq);
}
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
......
......@@ -910,9 +910,8 @@ static int xfrm_send_notify(struct xfrm_state *x, int hard)
BUG();
NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
return 0;
return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
}
/* XXX Make this xfrm_state.c:xfrm_get_acqseq() */
......@@ -971,9 +970,8 @@ static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
BUG();
NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE;
netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
return 0;
return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
}
/* User gives us xfrm_user_policy_info followed by an array of 0
......
......@@ -196,9 +196,11 @@ static int pfkey_release(struct socket *sock)
return 0;
}
static void pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
int allocation, struct sock *sk)
static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
int allocation, struct sock *sk)
{
int err = -ENOBUFS;
sock_hold(sk);
if (*skb2 == NULL) {
if (atomic_read(&skb->users) != 1) {
......@@ -215,9 +217,11 @@ static void pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
skb_queue_tail(&sk->receive_queue, *skb2);
sk->data_ready(sk, (*skb2)->len);
*skb2 = NULL;
err = 0;
}
}
sock_put(sk);
return err;
}
/* Send SKB to all pfkey sockets matching selected criteria. */
......@@ -225,21 +229,23 @@ static void pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
static void pfkey_broadcast(struct sk_buff *skb, int allocation,
int broadcast_flags, struct sock *one_sk)
static int pfkey_broadcast(struct sk_buff *skb, int allocation,
int broadcast_flags, struct sock *one_sk)
{
struct sock *sk;
struct sk_buff *skb2 = NULL;
int err = -ESRCH;
/* XXX Do we need something like netlink_overrun? I think
* XXX PF_KEY socket apps will not mind current behavior.
*/
if (!skb)
return;
return -ENOMEM;
pfkey_lock_table();
for (sk = pfkey_table; sk; sk = sk->next) {
struct pfkey_opt *pfk = pfkey_sk(sk);
int err2;
/* Yes, it means that if you are meant to receive this
* pfkey message you receive it twice as promiscuous
......@@ -261,16 +267,22 @@ static void pfkey_broadcast(struct sk_buff *skb, int allocation,
continue;
}
pfkey_broadcast_one(skb, &skb2, allocation, sk);
err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
/* Error is cleare after succecful sending to at least one
* registered KM */
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
}
pfkey_unlock_table();
if (one_sk != NULL)
pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
if (skb2)
kfree_skb(skb2);
kfree_skb(skb);
return err;
}
static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig)
......@@ -1101,8 +1113,12 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
if (x == NULL)
return 0;
if (x->km.state == XFRM_STATE_ACQ)
xfrm_state_delete(x);
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_ACQ) {
x->km.state = XFRM_STATE_ERROR;
wake_up(&km_waitq);
}
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return 0;
}
......@@ -1783,14 +1799,10 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
struct sk_buff *out_skb;
struct sadb_msg *out_hdr;
if (!ext_hdrs[SADB_X_EXT_POLICY-1])
if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL)
return -EINVAL;
pol = ext_hdrs[SADB_X_EXT_POLICY-1];
if (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir >= IPSEC_DIR_MAX)
return -EINVAL;
xp = xfrm_policy_byid(pol->sadb_x_policy_dir-1, pol->sadb_x_policy_id,
xp = xfrm_policy_byid(0, pol->sadb_x_policy_id,
hdr->sadb_msg_type == SADB_X_SPDDELETE2);
if (xp == NULL)
return -ENOENT;
......@@ -2142,9 +2154,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
else if (x->id.proto == IPPROTO_ESP)
dump_esp_combs(skb, t);
pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
return 0;
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
}
static struct xfrm_policy *pfkey_compile_policy(int opt, u8 *data, int len, int *dir)
......
......@@ -181,22 +181,22 @@ int llc_sk_init(struct sock* sk)
llc->inc_cntr = llc->dec_cntr = 2;
llc->dec_step = llc->connect_step = 1;
init_timer(&llc->ack_timer);
init_timer(&llc->ack_timer.timer);
llc->ack_timer.expire = LLC_ACK_TIME;
llc->ack_timer.timer.data = (unsigned long)sk;
llc->ack_timer.timer.function = llc_conn_ack_tmr_cb;
init_timer(&llc->pf_cycle_timer);
init_timer(&llc->pf_cycle_timer.timer);
llc->pf_cycle_timer.expire = LLC_P_TIME;
llc->pf_cycle_timer.timer.data = (unsigned long)sk;
llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb;
init_timer(&llc->rej_sent_timer);
init_timer(&llc->rej_sent_timer.timer);
llc->rej_sent_timer.expire = LLC_REJ_TIME;
llc->rej_sent_timer.timer.data = (unsigned long)sk;
llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb;
init_timer(&llc->busy_state_timer);
init_timer(&llc->busy_state_timer.timer);
llc->busy_state_timer.expire = LLC_BUSY_TIME;
llc->busy_state_timer.timer.data = (unsigned long)sk;
llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb;
......
......@@ -503,13 +503,13 @@ static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff
return -1;
}
void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
u32 group, int allocation)
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
u32 group, int allocation)
{
struct sock *sk;
struct sk_buff *skb2 = NULL;
int protocol = ssk->protocol;
int failure = 0;
int failure = 0, delivered = 0;
/* While we sleep in clone, do not allow to change socket list */
......@@ -544,8 +544,10 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
failure = 1;
} else if (netlink_broadcast_deliver(sk, skb2)) {
netlink_overrun(sk);
} else
} else {
delivered = 1;
skb2 = NULL;
}
sock_put(sk);
}
......@@ -554,6 +556,12 @@ void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
if (skb2)
kfree_skb(skb2);
kfree_skb(skb);
if (delivered)
return 0;
if (failure)
return -ENOBUFS;
return -ESRCH;
}
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
......
......@@ -283,6 +283,7 @@ extern int (*dlci_ioctl_hook)(unsigned int, void *);
EXPORT_SYMBOL(dlci_ioctl_hook);
#endif
EXPORT_SYMBOL(km_waitq);
EXPORT_SYMBOL(xfrm_cfg_sem);
EXPORT_SYMBOL(xfrm_policy_alloc);
EXPORT_SYMBOL(__xfrm_policy_destroy);
......
This diff is collapsed.
This diff is collapsed.
/* SCTP kernel reference Implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 International Business Machines, Corp.
* Copyright (c) 2001-2002 International Business Machines, Corp.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
......@@ -237,13 +237,14 @@ void sctp_endpoint_put(sctp_endpoint_t *ep)
/* Is this the endpoint we are looking for? */
sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *ep,
const sockaddr_storage_t *laddr)
const union sctp_addr *laddr)
{
sctp_endpoint_t *retval;
sctp_read_lock(&ep->base.addr_lock);
if (ep->base.bind_addr.port == laddr->v4.sin_port) {
if (sctp_bind_addr_has_addr(&ep->base.bind_addr, laddr)) {
if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
sctp_sk(ep->base.sk))) {
retval = ep;
goto out;
}
......@@ -262,7 +263,7 @@ sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *ep,
*/
sctp_association_t *__sctp_endpoint_lookup_assoc(
const sctp_endpoint_t *endpoint,
const sockaddr_storage_t *paddr,
const union sctp_addr *paddr,
sctp_transport_t **transport)
{
int rport;
......@@ -289,7 +290,7 @@ sctp_association_t *__sctp_endpoint_lookup_assoc(
/* Lookup association on an endpoint based on a peer address. BH-safe. */
sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
const sockaddr_storage_t *paddr,
const union sctp_addr *paddr,
sctp_transport_t **transport)
{
sctp_association_t *asoc;
......@@ -301,6 +302,30 @@ sctp_association_t *sctp_endpoint_lookup_assoc(const sctp_endpoint_t *ep,
return asoc;
}
/* Look for any peeled off association from the endpoint that matches the
* given peer address.
*/
int sctp_endpoint_is_peeled_off(sctp_endpoint_t *ep,
const union sctp_addr *paddr)
{
struct list_head *pos;
struct sockaddr_storage_list *addr;
sctp_bind_addr_t *bp;
sctp_read_lock(&ep->base.addr_lock);
bp = &ep->base.bind_addr;
list_for_each(pos, &bp->address_list) {
addr = list_entry(pos, struct sockaddr_storage_list, list);
if (sctp_has_association(&addr->a, paddr)) {
sctp_read_unlock(&ep->base.addr_lock);
return 1;
}
}
sctp_read_unlock(&ep->base.addr_lock);
return 0;
}
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
......@@ -316,13 +341,13 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
int error = 0;
if (ep->base.dead)
goto out;
return;
asoc = NULL;
inqueue = &ep->base.inqueue;
sk = ep->base.sk;
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
while (NULL != (chunk = sctp_pop_inqueue(inqueue))) {
subtype.chunk = chunk->chunk_hdr->type;
/* We might have grown an association since last we
......@@ -350,25 +375,16 @@ static void sctp_endpoint_bh_rcv(sctp_endpoint_t *ep)
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
/* FIX ME We really would rather NOT have to use
* GFP_ATOMIC.
*/
error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state,
ep, asoc, chunk, GFP_ATOMIC);
if (error != 0)
goto err_out;
if (error && chunk)
chunk->pdiscard = 1;
/* Check to see if the endpoint is freed in response to
* the incoming chunk. If so, get out of the while loop.
*/
if (!sctp_sk(sk)->ep)
goto out;
break;
}
err_out:
/* Is this the right way to pass errors up to the ULP? */
if (error)
ep->base.sk->err = -error;
out:
}
......@@ -60,64 +60,11 @@
/* Forward declarations for internal helpers. */
static int sctp_rcv_ootb(struct sk_buff *);
sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
const sockaddr_storage_t *laddr,
const sockaddr_storage_t *paddr,
const union sctp_addr *laddr,
const union sctp_addr *paddr,
sctp_transport_t **transportp);
sctp_endpoint_t *__sctp_rcv_lookup_endpoint(const sockaddr_storage_t *laddr);
sctp_endpoint_t *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
/* Initialize a sockaddr_storage from in incoming skb.
* FIXME: This belongs with AF specific sctp_func_t. --jgrimm
*/
static sockaddr_storage_t *sctp_sockaddr_storage_init(sockaddr_storage_t *addr,
const struct sk_buff *skb,
int is_saddr)
{
sockaddr_storage_t *ret = NULL;
void *to, *saddr, *daddr;
__u16 *port;
size_t len;
struct sctphdr *sh;
switch (skb->nh.iph->version) {
case 4:
to = &addr->v4.sin_addr.s_addr;
port = &addr->v4.sin_port;
saddr = &skb->nh.iph->saddr;
daddr = &skb->nh.iph->daddr;
len = sizeof(struct in_addr);
addr->v4.sin_family = AF_INET;
break;
case 6:
SCTP_V6(
to = &addr->v6.sin6_addr;
port = &addr->v6.sin6_port;
saddr = &skb->nh.ipv6h->saddr;
daddr = &skb->nh.ipv6h->daddr;
len = sizeof(struct in6_addr);
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0; /* FIXME */
addr->v6.sin6_scope_id = 0; /* FIXME */
break;
)
default:
goto out;
};
sh = (struct sctphdr *) skb->h.raw;
if (is_saddr) {
*port = ntohs(sh->source);
memcpy(to, saddr, len);
} else {
*port = ntohs(sh->dest);
memcpy(to, daddr, len);
}
ret = addr;
out:
return ret;
}
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
......@@ -147,8 +94,9 @@ int sctp_rcv(struct sk_buff *skb)
sctp_transport_t *transport = NULL;
sctp_chunk_t *chunk;
struct sctphdr *sh;
sockaddr_storage_t src;
sockaddr_storage_t dest;
union sctp_addr src;
union sctp_addr dest;
struct sctp_func *af;
int ret = 0;
if (skb->pkt_type!=PACKET_HOST)
......@@ -163,10 +111,15 @@ int sctp_rcv(struct sk_buff *skb)
if (sctp_rcv_checksum(skb) < 0)
goto bad_packet;
skb_pull(skb, sizeof(struct sctphdr));
skb_pull(skb, sizeof(struct sctphdr));
af = sctp_get_af_specific(ipver2af(skb->nh.iph->version));
if (unlikely(!af))
goto bad_packet;
sctp_sockaddr_storage_init(&src, skb, 1);
sctp_sockaddr_storage_init(&dest, skb, 0);
/* Initialize local addresses for lookups. */
af->from_skb(&src, skb, 1);
af->from_skb(&dest, skb, 0);
/* If the packet is to or from a non-unicast address,
* silently discard the packet.
......@@ -179,7 +132,7 @@ int sctp_rcv(struct sk_buff *skb)
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
if (!sctp_addr_is_valid(&src) || !sctp_addr_is_valid(&dest))
if (!af->addr_valid(&src) || !af->addr_valid(&dest))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
......@@ -219,7 +172,7 @@ int sctp_rcv(struct sk_buff *skb)
chunk->sctp_hdr = sh;
/* Set the source and destination addresses of the incoming chunk. */
sctp_init_addrs(chunk);
sctp_init_addrs(chunk, &src, &dest);
/* Remember where we came from. */
chunk->transport = transport;
......@@ -431,7 +384,7 @@ void sctp_unhash_endpoint(sctp_endpoint_t *ep)
}
/* Look up an endpoint. */
sctp_endpoint_t *__sctp_rcv_lookup_endpoint(const sockaddr_storage_t *laddr)
sctp_endpoint_t *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
{
sctp_hashbucket_t *head;
sctp_endpoint_common_t *epb;
......@@ -523,8 +476,8 @@ void __sctp_unhash_established(sctp_association_t *asoc)
}
/* Look up an association. */
sctp_association_t *__sctp_lookup_association(const sockaddr_storage_t *laddr,
const sockaddr_storage_t *paddr,
sctp_association_t *__sctp_lookup_association(const union sctp_addr *laddr,
const union sctp_addr *paddr,
sctp_transport_t **transportp)
{
sctp_hashbucket_t *head;
......@@ -559,8 +512,8 @@ sctp_association_t *__sctp_lookup_association(const sockaddr_storage_t *laddr,
}
/* Look up an association. BH-safe. */
sctp_association_t *sctp_lookup_association(const sockaddr_storage_t *laddr,
const sockaddr_storage_t *paddr,
sctp_association_t *sctp_lookup_association(const union sctp_addr *laddr,
const union sctp_addr *paddr,
sctp_transport_t **transportp)
{
sctp_association_t *asoc;
......@@ -568,13 +521,13 @@ sctp_association_t *sctp_lookup_association(const sockaddr_storage_t *laddr,
sctp_local_bh_disable();
asoc = __sctp_lookup_association(laddr, paddr, transportp);
sctp_local_bh_enable();
return asoc;
}
/* Is there an association matching the given local and peer addresses? */
int sctp_has_association(const sockaddr_storage_t *laddr,
const sockaddr_storage_t *paddr)
int sctp_has_association(const union sctp_addr *laddr,
const union sctp_addr *paddr)
{
sctp_association_t *asoc;
sctp_transport_t *transport;
......@@ -606,21 +559,19 @@ int sctp_has_association(const sockaddr_storage_t *laddr,
* in certain circumstances.
*
*/
static sctp_association_t *__sctp_rcv_initack_lookup(struct sk_buff *skb,
const sockaddr_storage_t *laddr, sctp_transport_t **transportp)
static sctp_association_t *__sctp_rcv_init_lookup(struct sk_buff *skb,
const union sctp_addr *laddr, sctp_transport_t **transportp)
{
sctp_association_t *asoc;
sockaddr_storage_t addr;
sockaddr_storage_t *paddr = &addr;
union sctp_addr addr;
union sctp_addr *paddr = &addr;
struct sctphdr *sh = (struct sctphdr *) skb->h.raw;
sctp_chunkhdr_t *ch;
__u8 *ch_end, *data;
sctp_paramhdr_t *parm;
union sctp_params params;
sctp_init_chunk_t *init;
ch = (sctp_chunkhdr_t *) skb->data;
ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
/* If this is INIT/INIT-ACK look inside the chunk too. */
switch (ch->type) {
case SCTP_CID_INIT:
......@@ -646,24 +597,17 @@ static sctp_association_t *__sctp_rcv_initack_lookup(struct sk_buff *skb,
/* Find the start of the TLVs and the end of the chunk. This is
* the region we search for address parameters.
*/
data = skb->data + sizeof(sctp_init_chunk_t);
/* See sctp_process_init() for how to go thru TLVs. */
while (data < ch_end) {
parm = (sctp_paramhdr_t *)data;
if (!parm->length)
break;
init = (sctp_init_chunk_t *)skb->data;
data += WORD_ROUND(ntohs(parm->length));
/* Walk the parameters looking for embedded addresses. */
sctp_walk_params(params, init, init_hdr.params) {
/* Note: Ignoring hostname addresses. */
if ((SCTP_PARAM_IPV4_ADDRESS != parm->type) &&
(SCTP_PARAM_IPV6_ADDRESS != parm->type))
if ((SCTP_PARAM_IPV4_ADDRESS != params.p->type) &&
(SCTP_PARAM_IPV6_ADDRESS != params.p->type))
continue;
sctp_param2sockaddr(paddr, (sctp_addr_param_t *)parm,
ntohs(sh->source));
sctp_param2sockaddr(paddr, params.addr, ntohs(sh->source));
asoc = __sctp_lookup_association(laddr, paddr, transportp);
if (asoc)
return asoc;
......@@ -674,20 +618,20 @@ static sctp_association_t *__sctp_rcv_initack_lookup(struct sk_buff *skb,
/* Lookup an association for an inbound skb. */
sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
const sockaddr_storage_t *paddr,
const sockaddr_storage_t *laddr,
const union sctp_addr *paddr,
const union sctp_addr *laddr,
sctp_transport_t **transportp)
{
sctp_association_t *asoc;
asoc = __sctp_lookup_association(laddr, paddr, transportp);
/* Further lookup for INIT-ACK packet.
/* Further lookup for INIT/INIT-ACK packets.
* SCTP Implementors Guide, 2.18 Handling of address
* parameters within the INIT or INIT-ACK.
*/
if (!asoc)
asoc = __sctp_rcv_initack_lookup(skb, laddr, transportp);
asoc = __sctp_rcv_init_lookup(skb, laddr, transportp);
return asoc;
}
......
......@@ -172,16 +172,16 @@ static inline int sctp_v6_xmit(struct sk_buff *skb)
/* Returns the dst cache entry for the given source and destination ip
* addresses.
*/
struct dst_entry *sctp_v6_get_dst(sockaddr_storage_t *daddr,
sockaddr_storage_t *saddr)
struct dst_entry *sctp_v6_get_dst(union sctp_addr *daddr,
union sctp_addr *saddr)
{
struct dst_entry *dst;
struct flowi fl = { .nl_u = { .ip6_u = { .daddr = &daddr->v6.sin6_addr,
} } };
struct flowi fl = {
.nl_u = { .ip6_u = { .daddr = &daddr->v6.sin6_addr, } } };
SCTP_DEBUG_PRINTK("%s: DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
__FUNCTION__, NIP6(fl.fl6_dst));
__FUNCTION__, NIP6(fl.fl6_dst));
if (saddr) {
fl.fl6_src = &saddr->v6.sin6_addr;
......@@ -206,12 +206,147 @@ struct dst_entry *sctp_v6_get_dst(sockaddr_storage_t *daddr,
return dst;
}
/* Check if the dst entry's source addr matches the given source addr. */
int sctp_v6_cmp_saddr(struct dst_entry *dst, sockaddr_storage_t *saddr)
/* Make a copy of all potential local addresses. */
static void sctp_v6_copy_addrlist(struct list_head *addrlist,
struct net_device *dev)
{
struct inet6_dev *in6_dev;
struct inet6_ifaddr *ifp;
struct sockaddr_storage_list *addr;
read_lock(&addrconf_lock);
if ((in6_dev = __in6_dev_get(dev)) == NULL) {
read_unlock(&addrconf_lock);
return;
}
read_lock(&in6_dev->lock);
for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) {
/* Add the address to the local list. */
addr = t_new(struct sockaddr_storage_list, GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
INIT_LIST_HEAD(&addr->list);
list_add_tail(&addr->list, addrlist);
}
}
read_unlock(&in6_dev->lock);
read_unlock(&addrconf_lock);
}
/* Initialize a sockaddr_storage from in incoming skb. */
static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
int is_saddr)
{
void *from;
__u16 *port;
struct sctphdr *sh;
port = &addr->v6.sin6_port;
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_flowinfo = 0; /* FIXME */
addr->v6.sin6_scope_id = 0; /* FIXME */
sh = (struct sctphdr *) skb->h.raw;
if (is_saddr) {
*port = ntohs(sh->source);
from = &skb->nh.ipv6h->saddr;
} else {
*port = ntohs(sh->dest);
from = &skb->nh.ipv6h->daddr;
}
ipv6_addr_copy(&addr->v6.sin6_addr, from);
}
/* Initialize a sctp_addr from a dst_entry. */
static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
addr->sa.sa_family = AF_INET6;
ipv6_addr_copy(&addr->v6.sin6_addr, &rt->rt6i_src.addr);
}
/* Compare addresses exactly. Well.. almost exactly; ignore scope_id
* for now. FIXME.
*/
static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2)
{
int match;
if (addr1->sa.sa_family != addr2->sa.sa_family)
return 0;
match = !ipv6_addr_cmp((struct in6_addr *)&addr1->v6.sin6_addr,
(struct in6_addr *)&addr2->v6.sin6_addr);
return ipv6_addr_cmp(&rt->rt6i_src.addr, &saddr->v6.sin6_addr);
return match;
}
/* Initialize addr struct to INADDR_ANY. */
static void sctp_v6_inaddr_any(union sctp_addr *addr, unsigned short port)
{
memset(addr, 0x00, sizeof(union sctp_addr));
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
}
/* Is this a wildcard address? */
static int sctp_v6_is_any(const union sctp_addr *addr)
{
int type;
type = ipv6_addr_type((struct in6_addr *)&addr->v6.sin6_addr);
return IPV6_ADDR_ANY == type;
}
/* This function checks if the address is a valid address to be used for
* SCTP.
*
* Output:
* Return 0 - If the address is a non-unicast or an illegal address.
* Return 1 - If the address is a unicast.
*/
static int sctp_v6_addr_valid(union sctp_addr *addr)
{
int ret = sctp_ipv6_addr_type(&addr->v6.sin6_addr);
/* FIXME: v4-mapped-v6 address support. */
/* Is this a non-unicast address */
if (!(ret & IPV6_ADDR_UNICAST))
return 0;
return 1;
}
/* What is the scope of 'addr'? */
static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
{
int v6scope;
sctp_scope_t retval;
/* The IPv6 scope is really a set of bit fields.
* See IFA_* in <net/if_inet6.h>. Map to a generic SCTP scope.
*/
v6scope = ipv6_addr_scope(&addr->v6.sin6_addr);
switch (v6scope) {
case IFA_HOST:
retval = SCTP_SCOPE_LOOPBACK;
break;
case IFA_LINK:
retval = SCTP_SCOPE_LINK;
break;
case IFA_SITE:
retval = SCTP_SCOPE_PRIVATE;
break;
default:
retval = SCTP_SCOPE_GLOBAL;
break;
};
return retval;
}
/* Initialize a PF_INET6 socket msg_name. */
......@@ -227,12 +362,13 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
}
/* Initialize a PF_INET msgname from a ulpevent. */
static void sctp_inet6_event_msgname(sctp_ulpevent_t *event, char *msgname, int *addrlen)
static void sctp_inet6_event_msgname(sctp_ulpevent_t *event, char *msgname,
int *addrlen)
{
struct sockaddr_in6 *sin6, *sin6from;
if (msgname) {
sockaddr_storage_t *addr;
union sctp_addr *addr;
sctp_inet6_msgname(msgname, addrlen);
sin6 = (struct sockaddr_in6 *)msgname;
......@@ -288,6 +424,49 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
}
}
/* Do we support this AF? */
static int sctp_inet6_af_supported(sa_family_t family)
{
/* FIXME: v4-mapped-v6 addresses. The I-D is still waffling
* on what to do with sockaddr formats for PF_INET6 sockets.
* For now assume we'll support both.
*/
switch (family) {
case AF_INET6:
case AF_INET:
return 1;
default:
return 0;
}
}
/* Address matching with wildcards allowed. This extra level
* of indirection lets us choose whether a PF_INET6 should
* disallow any v4 addresses if we so choose.
*/
static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2,
struct sctp_opt *opt)
{
struct sctp_func *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
if (!af1 || !af2)
return 0;
/* Today, wildcard AF_INET/AF_INET6. */
if (sctp_is_any(addr1) || sctp_is_any(addr2))
return 1;
if (addr1->sa.sa_family != addr2->sa.sa_family)
return 0;
return af1->cmp_addr(addr1, addr2);
}
static struct proto_ops inet6_seqpacket_ops = {
.family = PF_INET6,
.release = inet6_release,
......@@ -327,7 +506,14 @@ static sctp_func_t sctp_ipv6_specific = {
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.get_dst = sctp_v6_get_dst,
.cmp_saddr = sctp_v6_cmp_saddr,
.copy_addrlist = sctp_v6_copy_addrlist,
.from_skb = sctp_v6_from_skb,
.dst_saddr = sctp_v6_dst_saddr,
.cmp_addr = sctp_v6_cmp_addr,
.scope = sctp_v6_scope,
.addr_valid = sctp_v6_addr_valid,
.inaddr_any = sctp_v6_inaddr_any,
.is_any = sctp_v6_is_any,
.net_header_len = sizeof(struct ipv6hdr),
.sockaddr_len = sizeof(struct sockaddr_in6),
.sa_family = AF_INET6,
......@@ -336,6 +522,9 @@ static sctp_func_t sctp_ipv6_specific = {
static sctp_pf_t sctp_pf_inet6_specific = {
.event_msgname = sctp_inet6_event_msgname,
.skb_msgname = sctp_inet6_skb_msgname,
.af_supported = sctp_inet6_af_supported,
.cmp_addr = sctp_inet6_cmp_addr,
.af = &sctp_ipv6_specific,
};
/* Initialize IPv6 support and register with inet6 stack. */
......
......@@ -366,18 +366,13 @@ int sctp_packet_transmit(sctp_packet_t *packet)
*/
sh->checksum = htonl(crc32);
/* FIXME: Delete the rest of this switch statement once phase 2
* of address selection (ipv6 support) drops in.
*/
switch (transport->ipaddr.sa.sa_family) {
case AF_INET:
inet_sk(sk)->daddr = transport->ipaddr.v4.sin_addr.s_addr;
break;
case AF_INET6:
SCTP_V6(inet6_sk(sk)->daddr = transport->ipaddr.v6.sin6_addr;)
break;
default:
/* This is bogus address type, just bail. */
break;
};
/* IP layer ECN support
......@@ -430,10 +425,12 @@ int sctp_packet_transmit(sctp_packet_t *packet)
dst = transport->dst;
if (!dst || dst->obsolete) {
sctp_transport_route(transport, NULL);
sctp_transport_route(transport, NULL, sctp_sk(sk));
}
nskb->dst = dst_clone(transport->dst);
if (!nskb->dst)
goto no_route;
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb length %d\n",
nskb->len);
......@@ -441,6 +438,11 @@ int sctp_packet_transmit(sctp_packet_t *packet)
out:
packet->size = SCTP_IP_OVERHEAD;
return err;
no_route:
kfree_skb(nskb);
IP_INC_STATS_BH(IpOutNoRoutes);
err = -EHOSTUNREACH;
goto out;
}
/********************************************************************
......
......@@ -678,7 +678,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
if (!new_transport) {
new_transport = asoc->peer.active_path;
} else if (!new_transport->state.active) {
} else if (!new_transport->active) {
/* If the chunk is Heartbeat, send it to
* chunk->transport, even it's inactive.
*/
......@@ -835,7 +835,7 @@ int sctp_flush_outqueue(sctp_outqueue_t *q, int rtx_timeout)
*/
new_transport = chunk->transport;
if (new_transport == NULL ||
!new_transport->state.active)
!new_transport->active)
new_transport = asoc->peer.active_path;
/* Change packets if necessary. */
......@@ -1404,7 +1404,7 @@ static void sctp_check_transmitted(sctp_outqueue_t *q,
/* Mark the destination transport address as
* active if it is not so marked.
*/
if (!transport->state.active) {
if (!transport->active) {
sctp_assoc_control_transport(
transport->asoc,
transport,
......
......@@ -38,6 +38,7 @@
* La Monte H.P. Yarroll <piggy@acm.org>
* Narasimha Budihal <narasimha@refcode.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -181,6 +182,28 @@ DECLARE_PRIMITIVE(ABORT);
DECLARE_PRIMITIVE(SEND);
/* 10.1 ULP-to-SCTP
* J) Request Heartbeat
*
* Format: REQUESTHEARTBEAT(association id, destination transport address)
*
* -> result
*
* Instructs the local endpoint to perform a HeartBeat on the specified
* destination transport address of the given association. The returned
* result should indicate whether the transmission of the HEARTBEAT
* chunk to the destination address is successful.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o destination transport address - the transport address of the
* asociation on which a heartbeat should be issued.
*/
DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
/* COMMENT BUG. Find out where this is mentioned in the spec. */
int sctp_other_icmp_unreachfrag(sctp_association_t *asoc, void *arg)
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -41,6 +41,7 @@
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -68,11 +69,13 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
static void sctp_cmd_init_failed(sctp_cmd_seq_t *, sctp_association_t *asoc);
static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *, sctp_association_t *asoc,
sctp_event_t event_type, sctp_chunk_t *chunk);
static void sctp_cmd_process_init(sctp_cmd_seq_t *, sctp_association_t *asoc,
sctp_chunk_t *chunk,
sctp_init_chunk_t *peer_init,
int priority);
static int sctp_cmd_process_init(sctp_cmd_seq_t *, sctp_association_t *asoc,
sctp_chunk_t *chunk,
sctp_init_chunk_t *peer_init,
int priority);
static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *, sctp_association_t *);
static void sctp_cmd_hb_timers_update(sctp_cmd_seq_t *, sctp_association_t *,
sctp_transport_t *);
static void sctp_cmd_set_bind_addrs(sctp_cmd_seq_t *, sctp_association_t *,
sctp_bind_addr_t *);
static void sctp_cmd_transport_reset(sctp_cmd_seq_t *, sctp_association_t *,
......@@ -83,6 +86,8 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *, sctp_association_t *,
sctp_sackhdr_t *);
static void sctp_cmd_setup_t2(sctp_cmd_seq_t *, sctp_association_t *,
sctp_chunk_t *);
static void sctp_cmd_new_state(sctp_cmd_seq_t *, sctp_association_t *,
sctp_state_t);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
......@@ -193,6 +198,7 @@ int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error = -ENOMEM;
break;
case SCTP_DISPOSITION_DELETE_TCB:
......@@ -301,8 +307,7 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_NEW_STATE:
/* Enter a new state. */
asoc->state = command->obj.state;
asoc->state_timestamp = jiffies;
sctp_cmd_new_state(commands, asoc, command->obj.state);
break;
case SCTP_CMD_REPORT_TSN:
......@@ -339,9 +344,14 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
break;
case SCTP_CMD_PEER_INIT:
/* Process a unified INIT from the peer. */
sctp_cmd_process_init(commands, asoc, chunk,
command->obj.ptr, priority);
/* Process a unified INIT from the peer.
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
error = sctp_cmd_process_init(commands, asoc, chunk,
command->obj.ptr,
priority);
break;
case SCTP_CMD_GEN_COOKIE_ECHO:
......@@ -561,6 +571,11 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_hb_timers_start(commands, asoc);
break;
case SCTP_CMD_HB_TIMERS_UPDATE:
t = command->obj.transport;
sctp_cmd_hb_timers_update(commands, asoc, t);
break;
case SCTP_CMD_REPORT_ERROR:
error = command->obj.error;
break;
......@@ -581,11 +596,18 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
chunk->pdiscard = 1;
break;
case SCTP_CMD_RTO_PENDING:
t = command->obj.transport;
t->rto_pending = 1;
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
command->verb, command->obj.ptr);
break;
};
if (error)
return error;
}
return error;
......@@ -648,7 +670,7 @@ static sctp_chunk_t *sctp_do_ecn_ecne_work(sctp_association_t *asoc,
}
/* Always try to quiet the other end. In case of lost CWR,
* resend last_cwr_tsn.
* resend last_cwr_tsn.
*/
repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
......@@ -978,7 +1000,7 @@ static void sctp_do_8_2_transport_strike(sctp_association_t *asoc,
*/
asoc->overall_error_count++;
if (transport->state.active &&
if (transport->active &&
(transport->error_count++ >= transport->error_threshold)) {
SCTP_DEBUG_PRINTK("transport_strike: transport "
"IP:%d.%d.%d.%d failed.\n",
......@@ -1058,22 +1080,32 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
}
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie.
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static void sctp_cmd_process_init(sctp_cmd_seq_t *commands,
sctp_association_t *asoc,
sctp_chunk_t *chunk,
sctp_init_chunk_t *peer_init,
int priority)
static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
sctp_association_t *asoc,
sctp_chunk_t *chunk,
sctp_init_chunk_t *peer_init,
int priority)
{
/* The command sequence holds commands assuming that the
* processing will happen successfully. If this is not the
* case, rewind the sequence and add appropriate error handling
* to the sequence.
int error;
/* We only process the init as a sideeffect in a single
* case. This is when we process the INIT-ACK. If we
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
sctp_process_init(asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init,
priority);
if (!sctp_process_init(asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init,
priority))
error = -ENOMEM;
else
error = 0;
return error;
}
/* Helper function to break out starting up of heartbeat timers. */
......@@ -1096,6 +1128,16 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
}
}
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timers_update(sctp_cmd_seq_t *cmds,
sctp_association_t *asoc,
sctp_transport_t *t)
{
/* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto + jiffies))
sctp_transport_hold(t);
}
/* Helper function to break out SCTP_CMD_SET_BIND_ADDR handling. */
void sctp_cmd_set_bind_addrs(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
sctp_bind_addr_t *bp)
......@@ -1131,7 +1173,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
/* Mark the destination transport address as active if it is not so
* marked.
*/
if (!t->state.active)
if (!t->active)
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
SCTP_HEARTBEAT_SUCCESS);
......@@ -1154,10 +1196,6 @@ static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike(asoc, t);
/* Update the heartbeat timer. */
if (!mod_timer(&t->hb_timer, t->hb_interval + t->rto + jiffies))
sctp_transport_hold(t);
}
/* Helper function to process the process SACK command. */
......@@ -1196,3 +1234,18 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
chunk->transport = t;
}
/* Helper function to change the state of an association. */
static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, sctp_association_t *asoc,
sctp_state_t state)
{
asoc->state = state;
asoc->state_timestamp = jiffies;
/* Wake up any process waiting for the association to
* get established.
*/
if ((SCTP_STATE_ESTABLISHED == asoc->state) &&
(waitqueue_active(&asoc->wait)))
wake_up_interruptible(&asoc->wait);
}
......@@ -236,20 +236,18 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v =
skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
/* FIXME: sctp_process_init can fail, but there is no
* status nor handling.
*/
sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC);
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem_init;
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
......@@ -302,10 +300,10 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
return SCTP_DISPOSITION_DELETE_TCB;
nomem_ack:
sctp_association_free(new_asoc);
if (err_chunk)
sctp_free_chunk(err_chunk);
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -563,9 +561,11 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
* effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
sctp_process_init(new_asoc, chunk->chunk_hdr->type,
&chunk->subh.cookie_hdr->c.peer_addr, peer_init,
GFP_ATOMIC);
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
&chunk->subh.cookie_hdr->c.peer_addr,
peer_init, GFP_ATOMIC))
goto nomem_init;
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
......@@ -592,10 +592,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const sctp_endpoint_t *ep,
nomem_ev:
sctp_free_chunk(repl);
nomem_repl:
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
......@@ -664,6 +663,39 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const sctp_endpoint_t *ep,
return SCTP_DISPOSITION_NOMEM;
}
/* Generate and sendout a heartbeat packet. */
sctp_disposition_t sctp_sf_heartbeat(const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_transport_t *transport = (sctp_transport_t *) arg;
sctp_chunk_t *reply;
sctp_sender_hb_info_t hbinfo;
size_t paylen = 0;
hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
/* Send a heartbeat to our peer. */
paylen = sizeof(sctp_sender_hb_info_t);
reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
/* Generate a HEARTBEAT packet on the given transport. */
sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
......@@ -672,9 +704,6 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_transport_t *transport = (sctp_transport_t *) arg;
sctp_chunk_t *reply;
sctp_sender_hb_info_t hbinfo;
size_t paylen = 0;
if (asoc->overall_error_count >= asoc->overall_error_threshold) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
......@@ -689,34 +718,21 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
* HEARTBEAT is sent (see Section 8.3).
*/
hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
hbinfo.daddr = transport->ipaddr;
hbinfo.sent_at = jiffies;
/* Set rto_pending indicating that an RTT measurement is started
* with this heartbeat chunk.
*/
transport->rto_pending = 1;
/* Send a heartbeat to our peer. */
paylen = sizeof(sctp_sender_hb_info_t);
reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
if (transport->hb_allowed) {
if (SCTP_DISPOSITION_NOMEM ==
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
SCTP_TRANSPORT(transport));
}
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
......@@ -817,7 +833,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const sctp_endpoint_t *ep,
sctp_cmd_seq_t *commands)
{
sctp_chunk_t *chunk = arg;
sockaddr_storage_t from_addr;
union sctp_addr from_addr;
sctp_transport_t *link;
sctp_sender_hb_info_t *hbinfo;
unsigned long max_interval;
......@@ -866,7 +882,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const sctp_endpoint_t *ep,
/* Helper function to send out an abort for the restart
* condition.
*/
static int sctp_sf_send_restart_abort(sockaddr_storage_t *ssa,
static int sctp_sf_send_restart_abort(union sctp_addr *ssa,
sctp_chunk_t *init,
sctp_cmd_seq_t *commands)
{
......@@ -1125,8 +1141,13 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
*/
sctp_process_init(new_asoc, chunk->chunk_hdr->type, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr, GFP_ATOMIC);
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC)) {
retval = SCTP_DISPOSITION_NOMEM;
goto nomem_init;
}
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
......@@ -1197,6 +1218,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
nomem:
retval = SCTP_DISPOSITION_NOMEM;
goto cleanup;
nomem_init:
cleanup_asoc:
sctp_association_free(new_asoc);
goto cleanup;
......@@ -1326,15 +1348,16 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init, GFP_ATOMIC);
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init, GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
printk("cookie echo check\n");
return SCTP_DISPOSITION_CONSUME;
}
......@@ -1391,8 +1414,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init, GFP_ATOMIC);
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk), peer_init, GFP_ATOMIC))
goto nomem;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
......@@ -3656,6 +3680,39 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands);
}
/*
* Process the REQUESTHEARTBEAT primitive
*
* 10.1 ULP-to-SCTP
* J) Request Heartbeat
*
* Format: REQUESTHEARTBEAT(association id, destination transport address)
*
* -> result
*
* Instructs the local endpoint to perform a HeartBeat on the specified
* destination transport address of the given association. The returned
* result should indicate whether the transmission of the HEARTBEAT
* chunk to the destination address is successful.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o destination transport address - the transport address of the
* asociation on which a heartbeat should be issued.
*/
sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
const sctp_endpoint_t *ep,
const sctp_association_t *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return sctp_sf_heartbeat(ep, asoc, type, (sctp_transport_t *)arg,
commands);
}
/*
* Ignore the primitive event
*
......@@ -4257,7 +4314,8 @@ sctp_packet_t *sctp_ootb_pkt_new(const sctp_association_t *asoc,
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (sockaddr_storage_t *)&chunk->dest);
sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
sctp_sk(sctp_get_ctl_sock()));
packet = sctp_packet_init(packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0, NULL);
......
......@@ -39,6 +39,7 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
......@@ -706,21 +707,28 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_EMPTY */ \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
/* SCTP_STATE_CLOSED */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \
/* SCTP_STATE_COOKIE_WAIT */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_COOKIE_ECHOED */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_ESTABLISHED */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_SHUTDOWN_PENDING */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_SHUTDOWN_SENT */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_SHUTDOWN_RECEIVED */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
/* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
{.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \
{.fn = sctp_sf_do_prm_requestheartbeat, \
.name = "sctp_sf_do_prm_requestheartbeat"}, \
} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
#define TYPE_SCTP_PRIMITIVE_GETSRTTREPORT { \
......
This diff is collapsed.
......@@ -9,7 +9,7 @@
*
* This module provides the abstraction for an SCTP tranport representing
* a remote transport address. For local transport addresses, we just use
* sockaddr_storage_t.
* union sctp_addr.
*
* The SCTP reference implementation is free software;
* you can redistribute it and/or modify it under the terms of
......@@ -53,7 +53,7 @@
/* 1st Level Abstractions. */
/* Allocate and initialize a new transport. */
sctp_transport_t *sctp_transport_new(const sockaddr_storage_t *addr, int priority)
sctp_transport_t *sctp_transport_new(const union sctp_addr *addr, int priority)
{
sctp_transport_t *transport;
......@@ -78,14 +78,14 @@ sctp_transport_t *sctp_transport_new(const sockaddr_storage_t *addr, int priorit
/* Intialize a new transport from provided memory. */
sctp_transport_t *sctp_transport_init(sctp_transport_t *peer,
const sockaddr_storage_t *addr,
const union sctp_addr *addr,
int priority)
{
sctp_protocol_t *proto = sctp_get_protocol();
/* Copy in the address. */
peer->ipaddr = *addr;
peer->af_specific = sctp_get_af_specific(addr);
peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
peer->asoc = NULL;
/* From 6.3.1 RTO Calculation:
......@@ -104,8 +104,8 @@ sctp_transport_t *sctp_transport_init(sctp_transport_t *peer,
peer->last_time_used = jiffies;
peer->last_time_ecne_reduced = jiffies;
peer->state.active = 1;
peer->state.hb_allowed = 0;
peer->active = 1;
peer->hb_allowed = 0;
/* Initialize the default path max_retrans. */
peer->max_retrans = proto->max_retrans_path;
......@@ -203,17 +203,18 @@ void sctp_transport_set_owner(sctp_transport_t *transport,
/* Caches the dst entry for a transport's destination address and an optional
* souce address.
*/
void sctp_transport_route(sctp_transport_t *transport,
sockaddr_storage_t *saddr)
void sctp_transport_route(sctp_transport_t *transport, union sctp_addr *saddr,
struct sctp_opt *opt)
{
sctp_association_t *asoc = transport->asoc;
sctp_func_t *af = transport->af_specific;
sockaddr_storage_t *daddr = &transport->ipaddr;
struct sctp_func *af = transport->af_specific;
union sctp_addr *daddr = &transport->ipaddr;
sctp_bind_addr_t *bp;
rwlock_t *addr_lock;
struct sockaddr_storage_list *laddr;
struct list_head *pos;
struct dst_entry *dst;
union sctp_addr dst_saddr;
dst = af->get_dst(daddr, saddr);
......@@ -239,7 +240,8 @@ void sctp_transport_route(sctp_transport_t *transport,
list_for_each(pos, &bp->address_list) {
laddr = list_entry(pos, struct sockaddr_storage_list,
list);
if (af->cmp_saddr(dst, &laddr->a))
af->dst_saddr(&dst_saddr, dst);
if (opt->pf->cmp_addr(&dst_saddr, &laddr->a, opt))
goto out_unlock;
}
sctp_read_unlock(addr_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment