Commit 94fe29e3 authored by Vojtech Pavlik's avatar Vojtech Pavlik

Merge suse.cz:/data/bk/linus into suse.cz:/data/bk/input

parents d7b104d9 1c400de9
......@@ -116,11 +116,14 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
}
asmlinkage int
sys_sigaltstack(struct pt_regs regs)
sys_sigaltstack(unsigned long ebx)
{
const stack_t __user *uss = (const stack_t __user *)regs.ebx;
stack_t __user *uoss = (stack_t __user *)regs.ecx;
return do_sigaltstack(uss, uoss, regs.esp);
/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
struct pt_regs *regs = (struct pt_regs *)&ebx;
const stack_t __user *uss = (const stack_t __user *)ebx;
stack_t __user *uoss = (stack_t __user *)regs->ecx;
return do_sigaltstack(uss, uoss, regs->esp);
}
......
......@@ -39,44 +39,16 @@
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
int comp_initialized;
int decomp_initialized;
struct z_stream_s comp_stream;
struct z_stream_s decomp_stream;
};
static inline int deflate_gfp(void)
{
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static int deflate_init(void *ctx)
{
return 0;
}
static void deflate_exit(void *ctx)
{
struct deflate_ctx *dctx = ctx;
if (dctx->comp_initialized)
vfree(dctx->comp_stream.workspace);
if (dctx->decomp_initialized)
kfree(dctx->decomp_stream.workspace);
}
/*
* Lazy initialization to make interface simple without allocating
* un-needed workspaces. Thus can be called in softirq context.
*/
static int deflate_comp_init(struct deflate_ctx *ctx)
{
int ret = 0;
struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = __vmalloc(zlib_deflate_workspacesize(),
deflate_gfp()|__GFP_HIGHMEM,
PAGE_KERNEL);
stream->workspace = vmalloc(zlib_deflate_workspacesize());
if (!stream->workspace ) {
ret = -ENOMEM;
goto out;
......@@ -89,7 +61,6 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
ret = -EINVAL;
goto out_free;
}
ctx->comp_initialized = 1;
out:
return ret;
out_free:
......@@ -102,8 +73,7 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
int ret = 0;
struct z_stream_s *stream = &ctx->decomp_stream;
stream->workspace = kmalloc(zlib_inflate_workspacesize(),
deflate_gfp());
stream->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (!stream->workspace ) {
ret = -ENOMEM;
goto out;
......@@ -114,7 +84,6 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
ret = -EINVAL;
goto out_free;
}
ctx->decomp_initialized = 1;
out:
return ret;
out_free:
......@@ -122,6 +91,36 @@ static int deflate_decomp_init(struct deflate_ctx *ctx)
goto out;
}
static void deflate_comp_exit(struct deflate_ctx *ctx)
{
vfree(ctx->comp_stream.workspace);
}
static void deflate_decomp_exit(struct deflate_ctx *ctx)
{
kfree(ctx->decomp_stream.workspace);
}
static int deflate_init(void *ctx)
{
int ret;
ret = deflate_comp_init(ctx);
if (ret)
goto out;
ret = deflate_decomp_init(ctx);
if (ret)
deflate_comp_exit(ctx);
out:
return ret;
}
static void deflate_exit(void *ctx)
{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
static int deflate_compress(void *ctx, const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
......@@ -129,12 +128,6 @@ static int deflate_compress(void *ctx, const u8 *src, unsigned int slen,
struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->comp_stream;
if (!dctx->comp_initialized) {
ret = deflate_comp_init(dctx);
if (ret)
goto out;
}
ret = zlib_deflateReset(stream);
if (ret != Z_OK) {
ret = -EINVAL;
......@@ -165,12 +158,6 @@ static int deflate_decompress(void *ctx, const u8 *src, unsigned int slen,
struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->decomp_stream;
if (!dctx->decomp_initialized) {
ret = deflate_decomp_init(dctx);
if (ret)
goto out;
}
ret = zlib_inflateReset(stream);
if (ret != Z_OK) {
ret = -EINVAL;
......
......@@ -663,7 +663,10 @@ static int twofish_setkey(void *cx, const u8 *key,
/* Check key length. */
if (key_len != 16 && key_len != 24 && key_len != 32)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; /* unsupported key length */
}
/* Compute the first two words of the S vector. The magic numbers are
* the entries of the RS matrix, preprocessed through poly_to_exp. The
......
......@@ -586,7 +586,6 @@ static int bfusb_load_firmware(struct bfusb *bfusb, unsigned char *firmware, int
}
bfusb->udev->toggle[0] = bfusb->udev->toggle[1] = 0;
bfusb->udev->halted[0] = bfusb->udev->halted[1] = 0;
buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC);
if (!buf) {
......@@ -628,7 +627,6 @@ static int bfusb_load_firmware(struct bfusb *bfusb, unsigned char *firmware, int
}
bfusb->udev->toggle[0] = bfusb->udev->toggle[1] = 0;
bfusb->udev->halted[0] = bfusb->udev->halted[1] = 0;
BT_INFO("BlueFRITZ! USB device ready");
......
This diff is collapsed.
/*
*/
#ifndef __H8_H__
#define __H8_H__
/*
* Register address and offsets
*/
#define H8_BASE_ADDR 0x170 /* default */
#define H8_IRQ 9 /* default */
#define H8_STATUS_REG_OFF 0x4
#define H8_CMD_REG_OFF 0x4
#define H8_DATA_REG_OFF 0x0
/* H8 register bit definitions */
/* status register */
#define H8_OFULL 0x1 /* output data register full */
#define H8_IFULL 0x2 /* input data register full */
#define H8_CMD 0x8 /* command / not data */
#define H8_INTR 0xfa
#define H8_NACK 0xfc
#define H8_BYTE_LEVEL_ACK 0xfd
#define H8_CMD_ACK 0xfe
#define H8_SYNC_BYTE 0x99
/*
* H8 command definitions
*/
/* System info commands */
#define H8_SYNC 0x0
#define H8_RD_SN 0x1
#define H8_RD_ENET_ADDR 0x2
#define H8_RD_HW_VER 0x3
#define H8_RD_MIC_VER 0x4
#define H8_RD_MAX_TEMP 0x5
#define H8_RD_MIN_TEMP 0x6
#define H8_RD_CURR_TEMP 0x7
#define H8_RD_SYS_VARIENT 0x8
#define H8_RD_PWR_ON_CYCLES 0x9
#define H8_RD_PWR_ON_SECS 0xa
#define H8_RD_RESET_STATUS 0xb
#define H8_RD_PWR_DN_STATUS 0xc
#define H8_RD_EVENT_STATUS 0xd
#define H8_RD_ROM_CKSM 0xe
#define H8_RD_EXT_STATUS 0xf
#define H8_RD_USER_CFG 0x10
#define H8_RD_INT_BATT_VOLT 0x11
#define H8_RD_DC_INPUT_VOLT 0x12
#define H8_RD_HORIZ_PTR_VOLT 0x13
#define H8_RD_VERT_PTR_VOLT 0x14
#define H8_RD_EEPROM_STATUS 0x15
#define H8_RD_ERR_STATUS 0x16
#define H8_RD_NEW_BUSY_SPEED 0x17
#define H8_RD_CONFIG_INTERFACE 0x18
#define H8_RD_INT_BATT_STATUS 0x19
#define H8_RD_EXT_BATT_STATUS 0x1a
#define H8_RD_PWR_UP_STATUS 0x1b
#define H8_RD_EVENT_STATUS_MASK 0x56
/* Read/write/modify commands */
#define H8_CTL_EMU_BITPORT 0x32
#define H8_DEVICE_CONTROL 0x21
#define H8_CTL_TFT_BRT_DC 0x22
#define H8_CTL_WATCHDOG 0x23
#define H8_CTL_MIC_PROT 0x24
#define H8_CTL_INT_BATT_CHG 0x25
#define H8_CTL_EXT_BATT_CHG 0x26
#define H8_CTL_MARK_SPACE 0x27
#define H8_CTL_MOUSE_SENSITIVITY 0x28
#define H8_CTL_DIAG_MODE 0x29
#define H8_CTL_IDLE_AND_BUSY_SPDS 0x2a
#define H8_CTL_TFT_BRT_BATT 0x2b
#define H8_CTL_UPPER_TEMP 0x2c
#define H8_CTL_LOWER_TEMP 0x2d
#define H8_CTL_TEMP_CUTOUT 0x2e
#define H8_CTL_WAKEUP 0x2f
#define H8_CTL_CHG_THRESHOLD 0x30
#define H8_CTL_TURBO_MODE 0x31
#define H8_SET_DIAG_STATUS 0x40
#define H8_SOFTWARE_RESET 0x41
#define H8_RECAL_PTR 0x42
#define H8_SET_INT_BATT_PERCENT 0x43
#define H8_WRT_CFG_INTERFACE_REG 0x45
#define H8_WRT_EVENT_STATUS_MASK 0x57
#define H8_ENTER_POST_MODE 0x46
#define H8_EXIT_POST_MODE 0x47
/* Block transfer commands */
#define H8_RD_EEPROM 0x50
#define H8_WRT_EEPROM 0x51
#define H8_WRT_TO_STATUS_DISP 0x52
#define H8_DEFINE_SPC_CHAR 0x53
/* Generic commands */
#define H8_DEFINE_TABLE_STRING_ENTRY 0x60
/* Battery control commands */
#define H8_PERFORM_EMU_CMD 0x70
#define H8_EMU_RD_REG 0x71
#define H8_EMU_WRT_REG 0x72
#define H8_EMU_RD_RAM 0x73
#define H8_EMU_WRT_RAM 0x74
#define H8_BQ_RD_REG 0x75
#define H8_BQ_WRT_REG 0x76
/* System admin commands */
#define H8_PWR_OFF 0x80
/*
* H8 command related definitions
*/
/* device control argument bits */
#define H8_ENAB_EXTSMI 0x1
#define H8_DISAB_IRQ 0x2
#define H8_ENAB_FLASH_WRT 0x4
#define H8_ENAB_THERM 0x8
#define H8_ENAB_INT_PTR 0x10
#define H8_ENAB_LOW_SPD_IND 0x20
#define H8_ENAB_EXT_PTR 0x40
#define H8_DISAB_PWR_OFF_SW 0x80
#define H8_POWER_OFF 0x80
/* H8 read event status bits */
#define H8_DC_CHANGE 0x1
#define H8_INT_BATT_LOW 0x2
#define H8_INT_BATT_CHARGE_THRESHOLD 0x4
#define H8_INT_BATT_CHARGE_STATE 0x8
#define H8_INT_BATT_STATUS 0x10
#define H8_EXT_BATT_CHARGE_STATE 0x20
#define H8_EXT_BATT_LOW 0x40
#define H8_EXT_BATT_STATUS 0x80
#define H8_THERMAL_THRESHOLD 0x100
#define H8_WATCHDOG 0x200
#define H8_DOCKING_STATION_STATUS 0x400
#define H8_EXT_MOUSE_OR_CASE_SWITCH 0x800
#define H8_KEYBOARD 0x1000
#define H8_BATT_CHANGE_OVER 0x2000
#define H8_POWER_BUTTON 0x4000
#define H8_SHUTDOWN 0x8000
/* H8 control idle and busy speeds */
#define H8_SPEED_LOW 0x1
#define H8_SPEED_MED 0x2
#define H8_SPEED_HI 0x3
#define H8_SPEED_LOCKED 0x80
#define H8_MAX_CMD_SIZE 18
#define H8_Q_ALLOC_AMOUNT 10
/* H8 state field values */
#define H8_IDLE 1
#define H8_XMIT 2
#define H8_RCV 3
#define H8_RESYNC 4
#define H8_INTR_MODE 5
/* Mask values for control functions */
#define UTH_HYSTERESIS 5
#define DEFAULT_UTHERMAL_THRESHOLD 115
#define H8_TIMEOUT_INTERVAL 30
#define H8_RUN 4
#define H8_GET_MAX_TEMP 0x1
#define H8_GET_CURR_TEMP 0x2
#define H8_GET_UPPR_THRMAL_THOLD 0x4
#define H8_GET_ETHERNET_ADDR 0x8
#define H8_SYNC_OP 0x10
#define H8_SET_UPPR_THRMAL_THOLD 0x20
#define H8_GET_INT_BATT_STAT 0x40
#define H8_GET_CPU_SPD 0x80
#define H8_MANAGE_UTHERM 0x100
#define H8_MANAGE_LTHERM 0x200
#define H8_HALT 0x400
#define H8_CRASH 0x800
#define H8_GET_EXT_STATUS 0x10000
#define H8_MANAGE_QUIET 0x20000
#define H8_MANAGE_SPEEDUP 0x40000
#define H8_MANAGE_BATTERY 0x80000
#define H8_SYSTEM_DELAY_TEST 0x100000
#define H8_POWER_SWITCH_TEST 0x200000
/* CPU speeds and clock divisor values */
#define MHZ_14 5
#define MHZ_28 4
#define MHZ_57 3
#define MHZ_115 2
#define MHZ_230 0
/*
* H8 data
*/
struct h8_data {
u_int ser_num;
u_char ether_add[6];
u_short hw_ver;
u_short mic_ver;
u_short max_tmp;
u_short min_tmp;
u_short cur_tmp;
u_int sys_var;
u_int pow_on;
u_int pow_on_secs;
u_char reset_status;
u_char pwr_dn_status;
u_short event_status;
u_short rom_cksm;
u_short ext_status;
u_short u_cfg;
u_char ibatt_volt;
u_char dc_volt;
u_char ptr_horiz;
u_char ptr_vert;
u_char eeprom_status;
u_char error_status;
u_char new_busy_speed;
u_char cfg_interface;
u_short int_batt_status;
u_short ext_batt_status;
u_char pow_up_status;
u_char event_status_mask;
};
/*
* H8 command buffers
*/
typedef struct h8_cmd_q {
struct list_head link; /* double linked list */
int ncmd; /* number of bytes in command */
int nrsp; /* number of bytes in response */
int cnt; /* number of bytes sent/received */
int nacks; /* number of byte level acks */
u_char cmdbuf[H8_MAX_CMD_SIZE]; /* buffer to store command */
u_char rcvbuf[H8_MAX_CMD_SIZE]; /* buffer to store response */
} h8_cmd_q_t;
union intr_buf {
u_char byte[2];
u_int word;
};
#endif /* __H8_H_ */
......@@ -100,34 +100,6 @@ struct tc_prio_qopt
__u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
};
/* CSZ section */
struct tc_csz_qopt
{
int flows; /* Maximal number of guaranteed flows */
unsigned char R_log; /* Fixed point position for round number */
unsigned char delta_log; /* Log of maximal managed time interval */
__u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> CSZ band */
};
struct tc_csz_copt
{
struct tc_ratespec slice;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
__u32 limit;
__u32 buffer;
__u32 mtu;
};
enum
{
TCA_CSZ_UNSPEC,
TCA_CSZ_PARMS,
TCA_CSZ_RTAB,
TCA_CSZ_PTAB,
};
/* TBF section */
struct tc_tbf_qopt
......@@ -437,6 +409,6 @@ struct tc_netem_qopt
__u32 loss; /* random packet loss (0=none ~0=100%) */
__u32 gap; /* re-ordering gap (0 for delay all) */
__u32 duplicate; /* random packet dup (0=none ~0=100%) */
__u32 rate; /* maximum transmit rate (bytes/sec) */
__u32 jitter; /* random jitter in latency (us) */
};
#endif
......@@ -135,6 +135,11 @@ enum {
XFRM_MSG_POLEXPIRE,
#define XFRM_MSG_POLEXPIRE XFRM_MSG_POLEXPIRE
XFRM_MSG_FLUSHSA,
#define XFRM_MSG_FLUSHSA XFRM_MSG_FLUSHSA
XFRM_MSG_FLUSHPOLICY,
#define XFRM_MSG_FLUSHPOLICY XFRM_MSG_FLUSHPOLICY
XFRM_MSG_MAX
};
......@@ -242,6 +247,10 @@ struct xfrm_user_polexpire {
__u8 hard;
};
struct xfrm_usersa_flush {
__u8 proto;
};
#define XFRMGRP_ACQUIRE 1
#define XFRMGRP_EXPIRE 2
......
......@@ -176,6 +176,14 @@ struct l2cap_info_rsp {
__u8 data[0];
} __attribute__ ((packed));
/* info type */
#define L2CAP_IT_CL_MTU 0x0001
#define L2CAP_IT_FEAT_MASK 0x0002
/* info result */
#define L2CAP_IR_SUCCESS 0x0000
#define L2CAP_IR_NOTSUPP 0x0001
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
......
......@@ -149,7 +149,9 @@ void dst_release(struct dst_entry * dst)
{
if (dst) {
if (atomic_read(&dst->__refcnt) < 1)
printk(dst_underflow_bug_msg, dst, current_text_addr());
printk(dst_underflow_bug_msg,
atomic_read(&dst->__refcnt),
dst, current_text_addr());
atomic_dec(&dst->__refcnt);
}
}
......
......@@ -252,7 +252,7 @@ tcf_hash_create(struct tc_st *parm, struct rtattr *est, struct tc_action *a, int
}
spin_lock_init(&p->lock);
p->stats.lock = &p->lock;
p->stats_lock = &p->lock;
p->index = parm->index ? : tcf_hash_new_index();
p->tm.install = jiffies;
p->tm.lastuse = jiffies;
......
......@@ -917,25 +917,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
static inline int sock_i_uid(struct sock *sk)
{
int uid;
read_lock(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
read_unlock(&sk->sk_callback_lock);
return uid;
}
static inline unsigned long sock_i_ino(struct sock *sk)
{
unsigned long ino;
read_lock(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
read_unlock(&sk->sk_callback_lock);
return ino;
}
extern int sock_i_uid(struct sock *sk);
extern unsigned long sock_i_ino(struct sock *sk);
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
......@@ -1219,7 +1202,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
static inline int sock_writeable(struct sock *sk)
static inline int sock_writeable(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
......@@ -1229,17 +1212,17 @@ static inline int gfp_any(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(struct sock *sk, int noblock)
static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}
static inline long sock_sndtimeo(struct sock *sk, int noblock)
static inline long sock_sndtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
}
......
......@@ -272,20 +272,20 @@ static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
static inline const u32 tcp_v4_rcv_saddr(const struct sock *sk)
static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static inline const struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
&inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
}
static inline const struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
{
return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
}
......
......@@ -818,6 +818,7 @@ extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
extern int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl);
extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_rcv(struct sk_buff *skb);
extern int xfrm4_output(struct sk_buff **pskb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_check_size(struct sk_buff *skb);
......
......@@ -57,7 +57,7 @@
#define BT_DBG(D...)
#endif
#define VERSION "2.2"
#define VERSION "2.3"
static struct proto_ops l2cap_sock_ops;
......@@ -718,8 +718,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
return err;
}
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err = 0;
......@@ -1444,7 +1443,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
u16 scid, dcid, result, status;
struct sock *sk;
char req[128];
u8 req[128];
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
......@@ -1481,7 +1480,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
u16 dcid, flags;
u8 rsp[64];
u8 rsp[64];
struct sock *sk;
int result;
......@@ -1633,6 +1632,35 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
return 0;
}
static inline int l2cap_info_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_info_req *req = (struct l2cap_info_req *) data;
struct l2cap_info_rsp rsp;
u16 type;
type = __le16_to_cpu(req->type);
BT_DBG("type 0x%4.4x", type);
rsp.type = __cpu_to_le16(type);
rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP);
l2cap_send_rsp(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
return 0;
}
static inline int l2cap_info_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
{
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
u16 type, result;
type = __le16_to_cpu(rsp->type);
result = __le16_to_cpu(rsp->result);
BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
return 0;
}
static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
{
u8 *data = skb->data;
......@@ -1657,6 +1685,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
}
switch (cmd.code) {
case L2CAP_COMMAND_REJ:
/* FIXME: We should process this */
break;
case L2CAP_CONN_REQ:
err = l2cap_connect_req(conn, &cmd, data);
break;
......@@ -1681,17 +1713,19 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
err = l2cap_disconnect_rsp(conn, &cmd, data);
break;
case L2CAP_COMMAND_REJ:
/* FIXME: We should process this */
break;
case L2CAP_ECHO_REQ:
l2cap_send_rsp(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
break;
case L2CAP_ECHO_RSP:
break;
case L2CAP_INFO_REQ:
err = l2cap_info_req(conn, &cmd, data);
break;
case L2CAP_INFO_RSP:
err = l2cap_info_rsp(conn, &cmd, data);
break;
default:
......@@ -1704,7 +1738,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
struct l2cap_cmd_rej rej;
BT_DBG("error %d", err);
/* FIXME: Map err to a valid reason. */
/* FIXME: Map err to a valid reason */
rej.reason = __cpu_to_le16(0);
l2cap_send_rsp(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}
......@@ -1737,7 +1771,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
/* If socket recv buffers overflows we drop data here
* which is *bad* because L2CAP has to be reliable.
* But we don't have any other choice. L2CAP doesn't
* provide flow control mechanism */
* provide flow control mechanism. */
if (!sock_queue_rcv_skb(sk, skb))
goto done;
......@@ -2210,7 +2244,7 @@ EXPORT_SYMBOL(l2cap_load);
module_init(l2cap_init);
module_exit(l2cap_exit);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
......
......@@ -89,6 +89,15 @@ static int br_dev_stop(struct net_device *dev)
return 0;
}
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 68) || new_mtu > br_min_mtu(dev->priv))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static int br_dev_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
{
return -1;
......@@ -105,6 +114,7 @@ void br_dev_setup(struct net_device *dev)
dev->hard_start_xmit = br_dev_xmit;
dev->open = br_dev_open;
dev->set_multicast_list = br_dev_set_multicast_list;
dev->change_mtu = br_change_mtu;
dev->destructor = free_netdev;
SET_MODULE_OWNER(dev);
dev->stop = br_dev_stop;
......
......@@ -23,6 +23,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
if (skb->dev == p->dev ||
skb->len > p->dev->mtu ||
p->state != BR_STATE_FORWARDING)
return 0;
......
......@@ -295,6 +295,24 @@ int br_del_bridge(const char *name)
return ret;
}
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
int mtu = 0;
ASSERT_RTNL();
if (list_empty(&br->port_list))
mtu = 1500;
else {
list_for_each_entry(p, &br->port_list, list) {
if (!mtu || p->dev->mtu < mtu)
mtu = p->dev->mtu;
}
}
return mtu;
}
/* called with RTNL */
int br_add_if(struct net_bridge *br, struct net_device *dev)
{
......@@ -328,6 +346,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if ((br->dev->flags & IFF_UP) && (dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
br->dev->mtu = br_min_mtu(br);
}
return err;
......
......@@ -47,6 +47,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
spin_unlock_bh(&br->lock);
break;
case NETDEV_CHANGEMTU:
br->dev->mtu = br_min_mtu(br);
break;
case NETDEV_DOWN:
if (br->dev->flags & IFF_UP) {
spin_lock_bh(&br->lock);
......
......@@ -168,6 +168,7 @@ extern int br_add_if(struct net_bridge *br,
struct net_device *dev);
extern int br_del_if(struct net_bridge *br,
struct net_device *dev);
extern int br_min_mtu(const struct net_bridge *br);
/* br_input.c */
extern int br_handle_frame_finish(struct sk_buff *skb);
......
......@@ -711,6 +711,27 @@ void sock_rfree(struct sk_buff *skb)
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
int sock_i_uid(struct sock *sk)
{
int uid;
read_lock(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
read_unlock(&sk->sk_callback_lock);
return uid;
}
unsigned long sock_i_ino(struct sock *sk)
{
unsigned long ino;
read_lock(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
read_unlock(&sk->sk_callback_lock);
return ino;
}
/*
* Allocate a skb from the socket's send buffer.
*/
......@@ -1379,6 +1400,8 @@ EXPORT_SYMBOL(sock_rmalloc);
EXPORT_SYMBOL(sock_setsockopt);
EXPORT_SYMBOL(sock_wfree);
EXPORT_SYMBOL(sock_wmalloc);
EXPORT_SYMBOL(sock_i_uid);
EXPORT_SYMBOL(sock_i_ino);
#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_optmem_max);
EXPORT_SYMBOL(sysctl_rmem_max);
......
......@@ -196,8 +196,7 @@ config NET_IPIP
can be useful if you want to make your (or some other) machine
appear on a different network than it physically is, or to use
mobile-IP facilities (allowing laptops to seamlessly move between
networks without changing their IP addresses; check out
<http://anchor.cs.binghamton.edu/~mobileip/LJ/index.html>).
networks without changing their IP addresses).
Saying Y to this option will produce two modules ( = code which can
be inserted in and removed from the running kernel whenever you
......
......@@ -23,4 +23,5 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IP_VS) += ipvs/
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o xfrm4_tunnel.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_tunnel.o xfrm4_output.o
......@@ -67,52 +67,24 @@ static int ah_output(struct sk_buff **pskb)
char buf[60];
} tmp_iph;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
top_iph = (*pskb)->nh.iph;
iph = &tmp_iph.iph;
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
iph->daddr = top_iph->daddr;
iph = (*pskb)->nh.iph;
if (x->props.mode) {
err = xfrm4_tunnel_check_size(*pskb);
if (top_iph->ihl != 5) {
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
goto error;
top_iph = (struct iphdr*)skb_push(*pskb, x->props.header_len);
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = iph->tos;
if (x->props.flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
top_iph->frag_off = iph->frag_off & ~htons(IP_MF|IP_OFFSET);
if (!(iph->frag_off&htons(IP_DF)))
__ip_select_ident(top_iph, dst, 0);
top_iph->ttl = iph->ttl;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
memcpy(&tmp_iph, top_iph, 20);
memset(&(IPCB(*pskb)->opt), 0, sizeof(struct ip_options));
ah = (struct ip_auth_hdr*)(top_iph+1);
ah->nexthdr = IPPROTO_IPIP;
} else {
memcpy(&tmp_iph, (*pskb)->data, iph->ihl*4);
top_iph = (struct iphdr*)skb_push(*pskb, x->props.header_len);
memcpy(top_iph, &tmp_iph, iph->ihl*4);
if (top_iph->ihl != 5) {
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
goto error;
}
ah = (struct ip_auth_hdr*)((char*)top_iph+iph->ihl*4);
ah->nexthdr = iph->protocol;
}
iph = &tmp_iph.iph;
ah = (struct ip_auth_hdr *)((char *)top_iph+top_iph->ihl*4);
ah->nexthdr = top_iph->protocol;
top_iph->tos = 0;
top_iph->tot_len = htons((*pskb)->len);
top_iph->frag_off = 0;
......@@ -128,31 +100,19 @@ static int ah_output(struct sk_buff **pskb)
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
ahp->icv(ahp, *pskb, ah->auth_data);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
if (!x->props.mode) {
top_iph->daddr = iph->daddr;
if (iph->ihl != 5)
memcpy(top_iph+1, iph+1, iph->ihl*4 - sizeof(struct iphdr));
}
ip_send_check(top_iph);
top_iph->daddr = iph->daddr;
if (top_iph->ihl != 5)
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
(*pskb)->nh.raw = (*pskb)->data;
ip_send_check(top_iph);
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
err = 0;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
return err;
}
......
......@@ -23,7 +23,7 @@ int esp_output(struct sk_buff **pskb)
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
struct iphdr *iph, *top_iph;
struct iphdr *top_iph;
struct ip_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
......@@ -32,32 +32,9 @@ int esp_output(struct sk_buff **pskb)
int clen;
int alen;
int nfrags;
union {
struct iphdr iph;
char buf[60];
} tmp_iph;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm4_tunnel_check_size(*pskb);
if (err)
goto error;
} else {
/* Strip IP header in transport mode. Save it. */
iph = (*pskb)->nh.iph;
memcpy(&tmp_iph, iph, iph->ihl*4);
__skb_pull(*pskb, iph->ihl*4);
}
/* Strip IP+ESP header. */
__skb_pull(*pskb, (*pskb)->h.raw - (*pskb)->data);
/* Now skb is pure payload to encrypt */
err = -ENOMEM;
......@@ -85,35 +62,11 @@ int esp_output(struct sk_buff **pskb)
*(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
pskb_put(*pskb, trailer, clen - (*pskb)->len);
iph = (*pskb)->nh.iph;
if (x->props.mode) {
top_iph = (struct iphdr*)skb_push(*pskb, x->props.header_len);
esph = (struct ip_esp_hdr*)(top_iph+1);
*(u8*)(trailer->tail - 1) = IPPROTO_IPIP;
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = iph->tos; /* DS disclosed */
if (x->props.flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
top_iph->tot_len = htons((*pskb)->len + alen);
top_iph->frag_off = iph->frag_off&htons(IP_DF);
if (!(top_iph->frag_off))
ip_select_ident(top_iph, dst, NULL);
top_iph->ttl = iph->ttl; /* TTL disclosed */
top_iph->check = 0;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
memset(&(IPCB(*pskb)->opt), 0, sizeof(struct ip_options));
} else {
esph = (struct ip_esp_hdr*)skb_push(*pskb, x->props.header_len);
top_iph = (struct iphdr*)skb_push(*pskb, iph->ihl*4);
memcpy(top_iph, &tmp_iph, iph->ihl*4);
iph = &tmp_iph.iph;
top_iph->tot_len = htons((*pskb)->len + alen);
top_iph->check = 0;
top_iph->frag_off = iph->frag_off;
*(u8*)(trailer->tail - 1) = iph->protocol;
}
__skb_push(*pskb, (*pskb)->data - (*pskb)->nh.raw);
top_iph = (*pskb)->nh.iph;
esph = (struct ip_esp_hdr *)((*pskb)->nh.raw + top_iph->ihl*4);
top_iph->tot_len = htons((*pskb)->len + alen);
*(u8*)(trailer->tail - 1) = top_iph->protocol;
/* this is non-NULL only with UDP Encapsulation */
if (x->encap) {
......@@ -124,7 +77,7 @@ int esp_output(struct sk_buff **pskb)
uh = (struct udphdr *)esph;
uh->source = encap->encap_sport;
uh->dest = encap->encap_dport;
uh->len = htons((*pskb)->len + alen - sizeof(struct iphdr));
uh->len = htons((*pskb)->len + alen - top_iph->ihl*4);
uh->check = 0;
switch (encap->encap_type) {
......@@ -176,21 +129,9 @@ int esp_output(struct sk_buff **pskb)
ip_send_check(top_iph);
(*pskb)->nh.raw = (*pskb)->data;
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
err = 0;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
return err;
}
......
......@@ -498,10 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
skb_headroom(frag) < hlen)
goto slow_path;
/* Correct socket ownership. */
if (frag->sk == NULL && skb->sk)
goto slow_path;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path;
......@@ -1113,12 +1109,10 @@ int ip_push_pending_frames(struct sock *sk)
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
skb->truesize += tmp_skb->truesize;
__sock_put(tmp_skb->sk);
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
#endif
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
......
......@@ -121,28 +121,6 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
return err;
}
static void ipcomp_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct iphdr *iph, *top_iph;
iph = skb->nh.iph;
top_iph = (struct iphdr *)skb_push(skb, sizeof(struct iphdr));
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = iph->tos;
top_iph->tot_len = htons(skb->len);
if (!(iph->frag_off&htons(IP_DF)))
__ip_select_ident(top_iph, dst, 0);
top_iph->ttl = iph->ttl;
top_iph->check = 0;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
top_iph->frag_off = iph->frag_off&~htons(IP_MF|IP_OFFSET);
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
skb->nh.raw = skb->data;
}
static int ipcomp_output(struct sk_buff **pskb)
{
int err;
......@@ -153,39 +131,17 @@ static int ipcomp_output(struct sk_buff **pskb)
struct ipcomp_data *ipcd = x->data;
int hdr_len = 0;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
if (err)
goto error_nolock;
}
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, *pskb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm4_tunnel_check_size(*pskb);
if (err)
goto error;
} else {
/* Don't bother compressing */
iph = (*pskb)->nh.iph;
hdr_len = iph->ihl * 4;
}
iph = (*pskb)->nh.iph;
iph->tot_len = htons((*pskb)->len);
hdr_len = iph->ihl * 4;
if (((*pskb)->len - hdr_len) < ipcd->threshold) {
/* Don't bother compressing */
if (x->props.mode) {
ipcomp_tunnel_encap(x, *pskb);
iph = (*pskb)->nh.iph;
iph->protocol = IPPROTO_IPIP;
ip_send_check(iph);
}
goto out_ok;
}
if (x->props.mode)
ipcomp_tunnel_encap(x, *pskb);
if ((skb_is_nonlinear(*pskb) || skb_cloned(*pskb)) &&
skb_linearize(*pskb, GFP_ATOMIC) != 0) {
err = -ENOMEM;
......@@ -197,7 +153,6 @@ static int ipcomp_output(struct sk_buff **pskb)
if (err == -EMSGSIZE) {
if (x->props.mode) {
iph = (*pskb)->nh.iph;
iph->protocol = IPPROTO_IPIP;
ip_send_check(iph);
}
goto out_ok;
......@@ -207,36 +162,19 @@ static int ipcomp_output(struct sk_buff **pskb)
/* Install ipcomp header, convert into ipcomp datagram. */
iph = (*pskb)->nh.iph;
if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
IP_ECN_clear(iph);
iph->tot_len = htons((*pskb)->len);
iph->check = 0;
ipch = (struct ip_comp_hdr *)((char *)iph + iph->ihl * 4);
ipch->nexthdr = x->props.mode ? IPPROTO_IPIP : iph->protocol;
ipch->nexthdr = iph->protocol;
ipch->flags = 0;
ipch->cpi = htons((u16 )ntohl(x->id.spi));
iph->protocol = IPPROTO_COMP;
ip_send_check(iph);
(*pskb)->nh.raw = (*pskb)->data;
out_ok:
x->curlft.bytes += (*pskb)->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (((*pskb)->dst = dst_pop(dst)) == NULL) {
err = -EHOSTUNREACH;
goto error_nolock;
}
err = NET_XMIT_BYPASS;
err = 0;
out_exit:
return err;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(*pskb);
goto out_exit;
return err;
}
static void ipcomp4_err(struct sk_buff *skb, u32 info)
......
......@@ -109,7 +109,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
#ifdef CONFIG_IP_PIMSM_V2
static struct net_protocol pim_protocol;
#endif
static struct timer_list ipmr_expire_timer;
......@@ -1702,7 +1704,7 @@ static struct file_operations ipmr_vif_fops = {
.open = ipmr_vif_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = seq_release_private,
};
struct ipmr_mfc_iter {
......@@ -1737,6 +1739,9 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ipmr_mfc_iter *it = seq->private;
it->cache = NULL;
it->ct = 0;
return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
......@@ -1846,7 +1851,6 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
if (rc)
goto out_kfree;
memset(s, 0, sizeof(*s));
seq = file->private_data;
seq->private = s;
out:
......@@ -1862,7 +1866,7 @@ static struct file_operations ipmr_mfc_fops = {
.open = ipmr_mfc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = seq_release_private,
};
#endif
......
......@@ -76,7 +76,7 @@ static void dump_packet(const struct ipt_log_info *info,
printk("FRAG:%u ", ntohs(iph.frag_off) & IP_OFFSET);
if ((info->logflags & IPT_LOG_IPOPT)
&& iph.ihl * 4 != sizeof(struct iphdr)) {
&& iph.ihl * 4 > sizeof(struct iphdr)) {
unsigned char opt[4 * 15 - sizeof(struct iphdr)];
unsigned int i, optsize;
......@@ -143,7 +143,7 @@ static void dump_packet(const struct ipt_log_info *info,
printk("URGP=%u ", ntohs(tcph.urg_ptr));
if ((info->logflags & IPT_LOG_TCPOPT)
&& tcph.doff * 4 != sizeof(struct tcphdr)) {
&& tcph.doff * 4 > sizeof(struct tcphdr)) {
unsigned char opt[4 * 15 - sizeof(struct tcphdr)];
unsigned int i, optsize;
......
......@@ -217,6 +217,10 @@ static void ipt_ulog_packet(unsigned int hooknum,
pm = NLMSG_DATA(nlh);
/* We might not have a timestamp, get one */
if (skb->stamp.tv_sec == 0)
do_gettimeofday((struct timeval *)&skb->stamp);
/* copy hook, prefix, timestamp, payload, etc. */
pm->data_len = copy_len;
pm->timestamp_sec = skb->stamp.tv_sec;
......
......@@ -432,7 +432,7 @@ static struct file_operations rt_cpu_seq_fops = {
.open = rt_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
......
......@@ -682,17 +682,32 @@ u32 __tcp_select_window(struct sock *sk)
if (free_space > tp->rcv_ssthresh)
free_space = tp->rcv_ssthresh;
/* Get the largest window that is a nice multiple of mss.
* Window clamp already applied above.
* If our current window offering is within 1 mss of the
* free space we just keep it. This prevents the divide
* and multiply from happening most of the time.
* We also don't do any window rounding when the free space
* is too small.
/* Don't do rounding if we are using window scaling, since the
* scaled window will not line up with the MSS boundary anyway.
*/
window = tp->rcv_wnd;
if (window <= free_space - mss || window > free_space)
window = (free_space/mss)*mss;
if (tp->rcv_wscale) {
window = free_space;
/* Advertise enough space so that it won't get scaled away.
* Import case: prevent zero window announcement if
* 1<<rcv_wscale > mss.
*/
if (((window >> tp->rcv_wscale) << tp->rcv_wscale) != window)
window = (((window >> tp->rcv_wscale) + 1)
<< tp->rcv_wscale);
} else {
/* Get the largest window that is a nice multiple of mss.
* Window clamp already applied above.
* If our current window offering is within 1 mss of the
* free space we just keep it. This prevents the divide
* and multiply from happening most of the time.
* We also don't do any window rounding when the free space
* is too small.
*/
if (window <= free_space - mss || window > free_space)
window = (free_space/mss)*mss;
}
return window;
}
......
/*
* xfrm4_output.c - Common IPsec encapsulation code for IPv4.
* Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
/* Add encapsulation header.
*
* In transport mode, the IP header will be moved forward to make space
* for the encapsulation header.
*
* In tunnel mode, the top IP header will be constructed per RFC 2401.
* The following fields in it shall be filled in by x->type->output:
* tot_len
* check
*
* On exit, skb->h will be set to the start of the payload to be processed
* by x->type->output and skb->nh will be set to the top IP header.
*/
static void xfrm4_encap(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
struct iphdr *iph, *top_iph;
iph = skb->nh.iph;
skb->h.ipiph = iph;
skb->nh.raw = skb_push(skb, x->props.header_len);
top_iph = skb->nh.iph;
if (!x->props.mode) {
skb->h.raw += iph->ihl*4;
memmove(top_iph, iph, iph->ihl*4);
return;
}
top_iph->ihl = 5;
top_iph->version = 4;
/* DS disclosed */
top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos);
if (x->props.flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
top_iph->frag_off = iph->frag_off & htons(IP_DF);
if (!top_iph->frag_off)
__ip_select_ident(top_iph, dst, 0);
/* TTL disclosed */
top_iph->ttl = iph->ttl;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
top_iph->protocol = IPPROTO_IPIP;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
}
int xfrm4_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
int err;
if (skb->ip_summed == CHECKSUM_HW) {
err = skb_checksum_help(pskb, 0);
skb = *pskb;
if (err)
goto error_nolock;
}
spin_lock_bh(&x->lock);
err = xfrm_state_check(x, skb);
if (err)
goto error;
if (x->props.mode) {
err = xfrm4_tunnel_check_size(skb);
if (err)
goto error;
}
xfrm4_encap(skb);
err = x->type->output(pskb);
skb = *pskb;
if (err)
goto error;
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if (!(skb->dst = dst_pop(dst))) {
err = -EHOSTUNREACH;
goto error_nolock;
}
err = NET_XMIT_BYPASS;
out_exit:
return err;
error:
spin_unlock_bh(&x->lock);
error_nolock:
kfree_skb(skb);
goto out_exit;
}
......@@ -139,7 +139,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
/* Copy neighbout for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
dst_prev->output = dst_prev->xfrm->type->output;
dst_prev->output = xfrm4_output;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
x->u.rt.peer = rt->peer;
......
......@@ -36,52 +36,13 @@ int xfrm4_tunnel_check_size(struct sk_buff *skb)
static int ipip_output(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
struct iphdr *iph, *top_iph;
int tos, err;
if ((err = xfrm4_tunnel_check_size(skb)) != 0)
goto error_nolock;
struct iphdr *iph;
iph = skb->nh.iph;
iph->tot_len = htons(skb->len);
ip_send_check(iph);
spin_lock_bh(&x->lock);
tos = iph->tos;
top_iph = (struct iphdr *) skb_push(skb, x->props.header_len);
top_iph->ihl = 5;
top_iph->version = 4;
top_iph->tos = INET_ECN_encapsulate(tos, iph->tos);
top_iph->tot_len = htons(skb->len);
top_iph->frag_off = iph->frag_off & ~htons(IP_MF|IP_OFFSET);
if (!(iph->frag_off & htons(IP_DF)))
__ip_select_ident(top_iph, dst, 0);
top_iph->ttl = iph->ttl;
top_iph->protocol = IPPROTO_IPIP;
top_iph->check = 0;
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
ip_send_check(top_iph);
skb->nh.raw = skb->data;
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock_bh(&x->lock);
if ((skb->dst = dst_pop(dst)) == NULL) {
kfree_skb(skb);
err = -EHOSTUNREACH;
goto error_nolock;
}
return NET_XMIT_BYPASS;
error_nolock:
kfree_skb(skb);
return err;
return 0;
}
static int ipip_xfrm_rcv(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
......
......@@ -123,7 +123,7 @@ static int ipcomp6_output(struct sk_buff **pskb)
int err;
struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
struct ipv6hdr *tmp_iph = NULL, *iph, *top_iph;
struct ipv6hdr *iph, *top_iph;
int hdr_len = 0;
struct ipv6_comp_hdr *ipch;
struct ipcomp_data *ipcd = x->data;
......@@ -193,19 +193,11 @@ static int ipcomp6_output(struct sk_buff **pskb)
if ((dlen + sizeof(struct ipv6_comp_hdr)) >= plen) {
goto out_ok;
}
memcpy(start, scratch, dlen);
pskb_trim(*pskb, hdr_len+dlen);
memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));
/* insert ipcomp header and replace datagram */
tmp_iph = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_iph) {
err = -ENOMEM;
goto error;
}
memcpy(tmp_iph, (*pskb)->nh.raw, hdr_len);
top_iph = (struct ipv6hdr*)skb_push(*pskb, sizeof(struct ipv6_comp_hdr));
memcpy(top_iph, tmp_iph, hdr_len);
kfree(tmp_iph);
top_iph = (*pskb)->nh.ipv6h;
if (x->props.mode && (x->props.flags & XFRM_STATE_NOECN))
IP6_ECN_clear(top_iph);
......@@ -358,7 +350,7 @@ static int ipcomp6_init_state(struct xfrm_state *x, void *args)
goto error;
memset(ipcd, 0, sizeof(*ipcd));
x->props.header_len = sizeof(struct ipv6_comp_hdr);
x->props.header_len = 0;
if (x->props.mode)
x->props.header_len += sizeof(struct ipv6hdr);
......
......@@ -449,6 +449,6 @@ struct file_operations discovery_seq_fops = {
.open = discovery_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.release = seq_release,
};
#endif
......@@ -62,7 +62,7 @@ static struct file_operations ircomm_proc_fops = {
.open = ircomm_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
......
......@@ -1093,7 +1093,7 @@ struct file_operations irias_seq_fops = {
.open = irias_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.release = seq_release,
};
#endif /* PROC_FS */
......@@ -49,21 +49,6 @@ config NET_SCH_HFSC
To compile this code as a module, choose M here: the
module will be called sch_hfsc.
config NET_SCH_CSZ
tristate "CSZ packet scheduler"
depends on NET_SCHED
---help---
Say Y here if you want to use the Clark-Shenker-Zhang (CSZ) packet
scheduling algorithm for some of your network devices. At the
moment, this is the only algorithm that can guarantee service for
real-time applications (see the top of <file:net/sched/sch_csz.c>
for details and references about the algorithm).
Note: this scheduler is currently broken.
To compile this code as a module, choose M here: the
module will be called sch_csz.
#tristate ' H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
config NET_SCH_ATM
tristate "ATM pseudo-scheduler"
......
......@@ -12,7 +12,6 @@ obj-$(CONFIG_NET_ACT_POLICE) += police.o
obj-$(CONFIG_NET_CLS_POLICE) += police.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_CSZ) += sch_csz.o
obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
obj-$(CONFIG_NET_SCH_RED) += sch_red.o
......
This diff is collapsed.
This diff is collapsed.
......@@ -78,7 +78,7 @@ void sctp_inq_free(struct sctp_inq *queue)
struct sctp_chunk *chunk;
/* Empty the queue. */
while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)))
while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL)
sctp_chunk_free(chunk);
/* If there is a packet which is currently being worked on,
......
......@@ -133,7 +133,7 @@ void sctp_packet_free(struct sctp_packet *packet)
SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)))
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL)
sctp_chunk_free(chunk);
if (packet->malloced)
......@@ -370,7 +370,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* [This whole comment explains WORD_ROUND() below.]
*/
SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
if (sctp_chunk_is_data(chunk)) {
if (!chunk->has_tsn) {
......@@ -511,7 +511,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
* will get resent or dropped later.
*/
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) {
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
......
......@@ -245,7 +245,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away unacknowledged chunks. */
list_for_each(pos, &q->asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
while ((lchunk = sctp_list_dequeue(&transport->transmitted))) {
while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
/* Mark as part of a failed message. */
......@@ -282,7 +282,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
/* Throw away any leftover data chunks. */
while ((chunk = sctp_outq_dequeue_data(q))) {
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* Mark as send failure. */
sctp_chunk_fail(chunk, q->error);
......@@ -292,7 +292,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
q->error = 0;
/* Throw away any leftover control chunks. */
while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)))
while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL)
sctp_chunk_free(chunk);
}
......@@ -681,7 +681,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
queue = &q->control;
while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) {
while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) {
/* Pick the right transport to use. */
new_transport = chunk->transport;
......@@ -812,7 +812,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer = 0;
queue = &q->out;
while ((chunk = sctp_outq_dequeue_data(q))) {
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
......@@ -866,7 +866,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
"%p skb->users %d.\n",
ntohl(chunk->subh.data_hdr->tsn),
chunk->skb ?chunk->skb->head : 0,
chunk->skb ?chunk->skb->head : NULL,
chunk->skb ?
atomic_read(&chunk->skb->users) : -1);
......
......@@ -101,7 +101,7 @@ __init int sctp_proc_init(void)
{
if (!proc_net_sctp) {
struct proc_dir_entry *ent;
ent = proc_mkdir("net/sctp", 0);
ent = proc_mkdir("net/sctp", NULL);
if (ent) {
ent->owner = THIS_MODULE;
proc_net_sctp = ent;
......
......@@ -995,7 +995,7 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
new_addr = 0;
new_addr = NULL;
found = 0;
list_for_each(pos, &new_asoc->peer.transport_addr_list) {
......
......@@ -86,8 +86,6 @@
/* Forward declarations for internal helper functions. */
static int sctp_writeable(struct sock *sk);
static inline int sctp_wspace(struct sctp_association *asoc);
static inline void sctp_set_owner_w(struct sctp_chunk *chunk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
size_t msg_len);
......@@ -95,7 +93,8 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
static void sctp_wait_for_close(struct sock *sk, long timeo);
static inline int sctp_verify_addr(struct sock *, union sctp_addr *, int);
static struct sctp_af *sctp_sockaddr_af(struct sctp_opt *opt,
union sctp_addr *addr, int len);
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
......@@ -111,6 +110,64 @@ static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
extern kmem_cache_t *sctp_bucket_cachep;
extern int sctp_assoc_valid(struct sock *sk, struct sctp_association *asoc);
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
int amt = 0;
amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk)))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Look up the association by its id. If this is not a UDP-style
* socket, the ID field is always ignored.
*/
......@@ -1008,7 +1065,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sctp_sndrcvinfo *sinfo;
struct sctp_initmsg *sinit;
sctp_assoc_t associd = NULL;
sctp_cmsgs_t cmsgs = { 0 };
sctp_cmsgs_t cmsgs = { NULL };
int err;
sctp_scope_t scope;
long timeo;
......@@ -4144,64 +4201,6 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
return NULL;
}
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
{
struct sctp_af *af;
/* Verify basic sockaddr. */
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
if (!af)
return -EINVAL;
/* Is this a valid SCTP address? */
if (!af->addr_valid(addr, sctp_sk(sk)))
return -EINVAL;
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
return -EINVAL;
return 0;
}
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
int amt = 0;
amt = sk->sk_sndbuf - asoc->sndbuf_used;
if (amt < 0)
amt = 0;
return amt;
}
/* Increment the used sndbuf space count of the corresponding association by
* the size of the outgoing data chunk.
* Also, set the skb destructor for sndbuf accounting later.
*
* Since it is always 1-1 between chunk and skb, and also a new skb is always
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
* destructor in the data chunk skb for the purpose of the sndbuf space
* tracking.
*/
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
{
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
}
/* If sndbuf has changed, wake up per association sndbuf waiters. */
static void __sctp_write_space(struct sctp_association *asoc)
{
......
This diff is collapsed.
......@@ -49,10 +49,10 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
/* 1st Level Abstractions */
......@@ -97,12 +97,12 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
struct sk_buff *skb;
struct sctp_ulpevent *event;
while ((skb = __skb_dequeue(&ulpq->lobby))) {
while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
while ((skb = __skb_dequeue(&ulpq->reasm))) {
while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
......@@ -466,8 +466,8 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
......@@ -645,8 +645,8 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
}
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
......@@ -756,7 +756,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
......@@ -782,7 +782,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment