Commit 05a15ce5 authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents 81d10c10 59ddc721
......@@ -1494,6 +1494,8 @@ P: James Morris
M: jmorris@redhat.com
P: Hideaki YOSHIFUJI
M: yoshfuji@linux-ipv6.org
P: Patrick McHardy
M: kaber@coreworks.de
L: netdev@oss.sgi.com
S: Maintained
......
......@@ -149,7 +149,7 @@ asmlinkage int sys_ipc (uint call, int first, int second,
union semun fourth;
if (!ptr)
return -EINVAL;
if (get_user(fourth.__pad, (void * __user *) ptr))
if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
......
......@@ -78,7 +78,7 @@ sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fif
if (!ptr)
break;
if ((ret = verify_area (VERIFY_READ, ptr, sizeof(long)))
|| (ret = get_user(fourth.__pad, (void *__user *)ptr)))
|| (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
break;
ret = sys_semctl (first, second, third, fourth);
break;
......
......@@ -369,9 +369,9 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
*/
#define ACE_MINI_SIZE 100
#define ACE_MINI_BUFSIZE (ACE_MINI_SIZE + 2 + 16)
#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 2+4+16)
#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
#define ACE_MINI_BUFSIZE ACE_MINI_SIZE
#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
/*
* There seems to be a magic difference in the effect between 995 and 996
......@@ -678,7 +678,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
ringp = &ap->skb->rx_std_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE - (2 + 16),
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_std_ring[i].size = 0;
......@@ -698,7 +698,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
ringp = &ap->skb->rx_mini_skbuff[i];
mapping = pci_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE - (2 + 16),
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_mini_ring[i].size = 0;
......@@ -717,7 +717,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
ringp = &ap->skb->rx_jumbo_skbuff[i];
mapping = pci_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE - (2 + 16),
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->rx_jumbo_ring[i].size = 0;
......@@ -1257,7 +1257,7 @@ static int __init ace_init(struct net_device *dev)
set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
info->rx_std_ctrl.flags =
RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
......@@ -1700,17 +1700,14 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
/*
* Make sure IP header starts on a fresh cache line.
*/
skb_reserve(skb, 2 + 16);
skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE - (2 + 16),
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
......@@ -1718,7 +1715,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_STD_MTU + ETH_HLEN + 4;
rd->size = ACE_STD_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_STD_RING_ENTRIES;
}
......@@ -1766,17 +1763,14 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
/*
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE - (2 + 16),
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
......@@ -1784,7 +1778,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_MINI_SIZE;
rd->size = ACE_MINI_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_MINI_RING_ENTRIES;
}
......@@ -1827,17 +1821,14 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
if (!skb)
break;
/*
* Make sure the IP header ends up on a fresh cache line
*/
skb_reserve(skb, 2 + 16);
skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE - (2 + 16),
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
......@@ -1845,7 +1836,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
rd->size = ACE_JUMBO_BUFSIZE;
rd->idx = idx;
idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
}
......@@ -2027,19 +2018,19 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
*/
case 0:
rip = &ap->skb->rx_std_skbuff[skbidx];
mapsize = ACE_STD_BUFSIZE - (2 + 16);
mapsize = ACE_STD_BUFSIZE;
rxdesc = &ap->rx_std_ring[skbidx];
std_count++;
break;
case BD_FLG_JUMBO:
rip = &ap->skb->rx_jumbo_skbuff[skbidx];
mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
mapsize = ACE_JUMBO_BUFSIZE;
rxdesc = &ap->rx_jumbo_ring[skbidx];
atomic_dec(&ap->cur_jumbo_bufs);
break;
case BD_FLG_MINI:
rip = &ap->skb->rx_mini_skbuff[skbidx];
mapsize = ACE_MINI_BUFSIZE - (2 + 16);
mapsize = ACE_MINI_BUFSIZE;
rxdesc = &ap->rx_mini_ring[skbidx];
mini_count++;
break;
......
......@@ -389,7 +389,7 @@ static inline int eql_is_full(slave_queue_t *queue)
static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
{
if (!eql_is_full(queue)) {
slave_t *duplicate_slave = 0;
slave_t *duplicate_slave = NULL;
duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
if (duplicate_slave != 0)
......
......@@ -107,7 +107,7 @@ extern void __get_user_unknown(void);
#define __get_user_check(x,ptr,size,segment) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
__gu_err = 0; \
......@@ -222,7 +222,7 @@ extern void __put_user_unknown(void);
#define __put_user_check(x,ptr,size,segment) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
__pu_err = 0; \
......
......@@ -261,7 +261,7 @@ extern void __put_user_bad(void);
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
might_sleep(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT); \
......
......@@ -34,7 +34,8 @@
((addr) <= current->thread.fs.seg \
&& ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
#define access_ok(type, addr, size) \
(__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
extern inline int verify_area(int type, const void __user * addr, unsigned long size)
{
......@@ -105,6 +106,7 @@ extern long __put_user_bad(void);
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
__chk_user_ptr(ptr); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
......@@ -112,7 +114,7 @@ extern long __put_user_bad(void);
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
......@@ -179,6 +181,7 @@ do { \
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err, __gu_val; \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
......@@ -188,6 +191,7 @@ do { \
({ \
long __gu_err; \
long long __gu_val; \
__chk_user_ptr(ptr); \
__get_user_size64(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
......@@ -196,7 +200,7 @@ do { \
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
......@@ -207,7 +211,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
long long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
......
......@@ -175,7 +175,7 @@ do { \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT, __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT);\
(x) = (__typeof__(*(ptr)))__gu_val; \
......
......@@ -148,7 +148,7 @@ extern void __put_user_bad(void);
#define __put_user_check(x,ptr,size) \
({ \
int __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
......
......@@ -147,7 +147,7 @@ struct atm_dev_stats {
struct atm_iobuf {
int length;
void *buffer;
void __user *buffer;
};
/* for ATM_GETCIRANGE / ATM_SETCIRANGE */
......
......@@ -117,8 +117,17 @@ enum
struct tc_police
{
__u32 index;
#ifdef CONFIG_NET_CLS_ACT
int refcnt;
int bindcnt;
#endif
/* Turned off because it requires new tc
* to work (for now maintain ABI)
*
#ifdef CONFIG_NET_CLS_ACT
__u32 capab;
#endif
*/
int action;
#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
#define TC_POLICE_OK TC_ACT_OK
......@@ -186,8 +195,12 @@ enum
TCA_U32_DIVISOR,
TCA_U32_SEL,
TCA_U32_POLICE,
#ifdef CONFIG_NET_CLS_ACT
TCA_U32_ACT,
#endif
#ifdef CONFIG_NET_CLS_IND
TCA_U32_INDEV,
#endif
__TCA_U32_MAX
};
......@@ -199,7 +212,9 @@ struct tc_u32_key
__u32 val;
int off;
int offmask;
__u32 kcnt;
#ifdef CONFIG_CLS_U32_PERF
unsigned long kcnt;
#endif
};
struct tc_u32_sel
......@@ -214,9 +229,11 @@ struct tc_u32_sel
short hoff;
__u32 hmask;
struct tc_u32_key keys[0];
#ifdef CONFIG_CLS_U32_PERF
unsigned long rcnt;
unsigned long rhit;
#endif
struct tc_u32_key keys[0];
};
/* Flags */
......@@ -283,8 +300,12 @@ enum
TCA_FW_UNSPEC,
TCA_FW_CLASSID,
TCA_FW_POLICE,
#ifdef CONFIG_NET_CLS_IND
TCA_FW_INDEV,
#endif
#ifdef CONFIG_NET_CLS_ACT
TCA_FW_ACT,
#endif
__TCA_FW_MAX
};
......
#ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
#define PSCHED_GETTIMEOFDAY 1
#define PSCHED_JIFFIES 2
#define PSCHED_CPU 3
#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
#include <linux/config.h>
#include <linux/netdevice.h>
#include <linux/types.h>
......@@ -16,11 +10,6 @@
#include <linux/module.h>
#include <linux/rtnetlink.h>
#ifdef CONFIG_X86_TSC
#include <asm/msr.h>
#endif
struct rtattr;
struct Qdisc;
......@@ -184,25 +173,19 @@ __cls_set_class(unsigned long *clp, unsigned long cl)
The reason is that, when it is not the same thing as
gettimeofday, it returns invalid timestamp, which is
not updated, when net_bh is active.
So, use PSCHED_CLOCK_SOURCE = PSCHED_CPU on alpha and pentiums
with rtdsc. And PSCHED_JIFFIES on all other architectures, including [34]86
and pentiums without rtdsc.
You can use PSCHED_GETTIMEOFDAY on another architectures,
which have fast and precise clock source, but it is too expensive.
*/
/* General note about internal clock.
Any clock source returns time intervals, measured in units
close to 1usec. With source PSCHED_GETTIMEOFDAY it is precisely
close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
microseconds, otherwise something close but different chosen to minimize
arithmetic cost. Ratio usec/internal untis in form nominator/denominator
may be read from /proc/net/psched.
*/
#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
typedef struct timeval psched_time_t;
typedef long psched_tdiff_t;
......@@ -211,14 +194,12 @@ typedef long psched_tdiff_t;
#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
extern psched_time_t psched_time_base;
#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
#ifdef CONFIG_NET_SCH_CLK_JIFFIES
#if HZ < 96
#define PSCHED_JSCALE 14
......@@ -236,47 +217,35 @@ extern psched_time_t psched_time_base;
#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
#elif PSCHED_CLOCK_SOURCE == PSCHED_CPU
#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
#ifdef CONFIG_NET_SCH_CLK_CPU
#include <asm/timex.h>
extern psched_tdiff_t psched_clock_per_hz;
extern int psched_clock_scale;
extern psched_time_t psched_time_base;
extern cycles_t psched_time_mark;
#define PSCHED_GET_TIME(stamp) \
do { \
cycles_t cur = get_cycles(); \
if (sizeof(cycles_t) == sizeof(u32)) { \
if (cur <= psched_time_mark) \
psched_time_base += 0x100000000ULL; \
psched_time_mark = cur; \
(stamp) = (psched_time_base + cur)>>psched_clock_scale; \
} else { \
(stamp) = cur>>psched_clock_scale; \
} \
} while (0)
#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
#ifdef CONFIG_X86_TSC
#define PSCHED_GET_TIME(stamp) \
({ u64 __cur; \
rdtscll(__cur); \
(stamp) = __cur>>psched_clock_scale; \
})
#elif defined (__alpha__)
#endif /* CONFIG_NET_SCH_CLK_CPU */
#define PSCHED_WATCHER u32
#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
extern PSCHED_WATCHER psched_time_mark;
#define PSCHED_GET_TIME(stamp) \
({ u32 __res; \
__asm__ __volatile__ ("rpcc %0" : "r="(__res)); \
if (__res <= psched_time_mark) psched_time_base += 0x100000000UL; \
psched_time_mark = __res; \
(stamp) = (psched_time_base + __res)>>psched_clock_scale; \
})
#else
#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
#endif /* ARCH */
#endif /* PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES */
#endif /* PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY */
#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
#define PSCHED_TDIFF(tv1, tv2) \
({ \
int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
......@@ -340,7 +309,7 @@ extern int psched_tod_diff(int delta_sec, int bound);
#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
#else
#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
......@@ -354,7 +323,7 @@ extern int psched_tod_diff(int delta_sec, int bound);
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
#endif
#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
struct tcf_police
{
......
......@@ -94,6 +94,9 @@ typedef enum {
SCTP_CMD_REPORT_FWDTSN, /* Report new cumulative TSN Ack. */
SCTP_CMD_PROCESS_FWDTSN, /* Skips were reported, so process further. */
SCTP_CMD_CLEAR_INIT_TAG, /* Clears association peer's inittag. */
SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
SCTP_CMD_LAST
} sctp_verb_t;
......
......@@ -175,6 +175,10 @@ typedef enum {
SCTP_IERROR_BAD_TAG,
SCTP_IERROR_BIG_GAP,
SCTP_IERROR_DUP_TSN,
SCTP_IERROR_HIGH_TSN,
SCTP_IERROR_IGNORE_TSN,
SCTP_IERROR_NO_DATA,
SCTP_IERROR_BAD_STREAM,
} sctp_ierror_t;
......
......@@ -322,6 +322,9 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands);
/* 3rd level prototypes */
__u32 sctp_generate_tag(const struct sctp_endpoint *);
......
......@@ -1421,15 +1421,9 @@ static int filemap_populate(struct vm_area_struct *vma,
return err;
}
} else {
/*
* If a nonlinear mapping then store the file page offset
* in the pte.
*/
if (pgoff != linear_page_index(vma, addr)) {
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
}
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
}
len -= PAGE_SIZE;
......
......@@ -1121,15 +1121,9 @@ static int shmem_populate(struct vm_area_struct *vma,
return err;
}
} else if (nonblock) {
/*
* If a nonlinear mapping then store the file page
* offset in the pte.
*/
if (pgoff != linear_page_index(vma, addr)) {
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
}
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
}
len -= PAGE_SIZE;
......
......@@ -73,9 +73,9 @@ static int ah_output(struct sk_buff **pskb)
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
iph->frag_off = top_iph->frag_off;
iph->daddr = top_iph->daddr;
if (top_iph->ihl != 5) {
iph->daddr = top_iph->daddr;
memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
if (err)
......@@ -104,9 +104,10 @@ static int ah_output(struct sk_buff **pskb)
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
top_iph->daddr = iph->daddr;
if (top_iph->ihl != 5)
if (top_iph->ihl != 5) {
top_iph->daddr = iph->daddr;
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
}
ip_send_check(top_iph);
......
......@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
......@@ -44,16 +45,17 @@
* First port is set to the default port.
*/
static int ports[IP_VS_APP_MAX_PORTS] = {21, 0};
static int ports_c;
module_param_array(ports, int, ports_c, 0);
/*
* Debug level
*/
#ifdef CONFIG_IP_VS_DEBUG
static int debug=0;
MODULE_PARM(debug, "i");
module_param(debug, int, 0);
#endif
MODULE_PARM(ports, "1-" __MODULE_STRING(IP_VS_APP_MAX_PORTS) "i");
/* Dummy variable */
static int ip_vs_ftp_pasv;
......
......@@ -74,74 +74,45 @@ static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
return 0;
}
static int ipv6_clear_mutable_options(struct sk_buff *skb, u16 *nh_offset, int dir)
static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
unsigned int packet_len = skb->tail - skb->nh.raw;
u8 nexthdr = skb->nh.ipv6h->nexthdr;
u8 nextnexthdr = 0;
*nh_offset = ((unsigned char *)&skb->nh.ipv6h->nexthdr) - skb->nh.raw;
while (offset + 1 <= packet_len) {
union {
struct ipv6hdr *iph;
struct ipv6_opt_hdr *opth;
struct ipv6_rt_hdr *rth;
char *raw;
} exthdr = { .iph = iph };
char *end = exthdr.raw + len;
int nexthdr = iph->nexthdr;
exthdr.iph++;
while (exthdr.raw < end) {
switch (nexthdr) {
case NEXTHDR_HOP:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
LIMIT_NETDEBUG(
printk(KERN_WARNING "overrun hopopts\n"));
return 0;
case NEXTHDR_DEST:
if (!zero_out_mutable_opts(exthdr.opth)) {
LIMIT_NETDEBUG(printk(
KERN_WARNING "overrun %sopts\n",
nexthdr == NEXTHDR_HOP ?
"hop" : "dest"));
return -EINVAL;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_ROUTING:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
((struct ipv6_rt_hdr*)exthdr)->segments_left = 0;
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
break;
case NEXTHDR_DEST:
*nh_offset = offset;
offset += ipv6_optlen(exthdr);
if (!zero_out_mutable_opts(exthdr)) {
LIMIT_NETDEBUG(
printk(KERN_WARNING "overrun destopt\n"));
return 0;
}
nexthdr = exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
exthdr.rth->segments_left = 0;
break;
case NEXTHDR_AUTH:
if (dir == XFRM_POLICY_OUT) {
memset(((struct ipv6_auth_hdr*)exthdr)->auth_data, 0,
(((struct ipv6_auth_hdr*)exthdr)->hdrlen - 1) << 2);
}
if (exthdr->nexthdr == NEXTHDR_DEST) {
offset += (((struct ipv6_auth_hdr*)exthdr)->hdrlen + 2) << 2;
exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
nextnexthdr = exthdr->nexthdr;
if (!zero_out_mutable_opts(exthdr)) {
LIMIT_NETDEBUG(
printk(KERN_WARNING "overrun destopt\n"));
return 0;
}
}
return nexthdr;
default :
return nexthdr;
return 0;
}
nexthdr = exthdr.opth->nexthdr;
exthdr.raw += ipv6_optlen(exthdr.opth);
}
return nexthdr;
return 0;
}
int ah6_output(struct sk_buff **pskb)
......@@ -153,7 +124,6 @@ int ah6_output(struct sk_buff **pskb)
struct ipv6hdr *iph = NULL;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
u16 nh_offset = 0;
u8 nexthdr;
if ((*pskb)->ip_summed == CHECKSUM_HW) {
......@@ -184,7 +154,11 @@ int ah6_output(struct sk_buff **pskb)
ah = (struct ip_auth_hdr*)((*pskb)->nh.ipv6h+1);
ah->nexthdr = IPPROTO_IPV6;
} else {
hdr_len = (*pskb)->h.raw - (*pskb)->nh.raw;
u8 *prevhdr;
hdr_len = ip6_find_1stfragopt(*pskb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = IPPROTO_AH;
iph = kmalloc(hdr_len, GFP_ATOMIC);
if (!iph) {
err = -ENOMEM;
......@@ -192,13 +166,12 @@ int ah6_output(struct sk_buff **pskb)
}
memcpy(iph, (*pskb)->data, hdr_len);
(*pskb)->nh.ipv6h = (struct ipv6hdr*)skb_push(*pskb, x->props.header_len);
iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
nexthdr = ipv6_clear_mutable_options(*pskb, &nh_offset, XFRM_POLICY_OUT);
if (nexthdr == 0)
err = ipv6_clear_mutable_options((*pskb)->nh.ipv6h, hdr_len);
if (err)
goto error_free_iph;
(*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
(*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
ah = (struct ip_auth_hdr*)((*pskb)->nh.raw+hdr_len);
(*pskb)->h.raw = (unsigned char*) ah;
ah->nexthdr = nexthdr;
......@@ -229,8 +202,6 @@ int ah6_output(struct sk_buff **pskb)
IP6_ECN_clear((*pskb)->nh.ipv6h);
} else {
memcpy((*pskb)->nh.ipv6h, iph, hdr_len);
(*pskb)->nh.raw[nh_offset] = IPPROTO_AH;
(*pskb)->nh.ipv6h->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));
kfree (iph);
}
......@@ -259,7 +230,6 @@ int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_bu
* Before process AH
* [IPv6][Ext1][Ext2][AH][Dest][Payload]
* |<-------------->| hdr_len
* |<------------------------>| cleared_hlen
*
* To erase AH:
* Keeping copy of cleared headers. After AH processing,
......@@ -276,7 +246,6 @@ int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_bu
unsigned char *tmp_hdr = NULL;
u16 hdr_len;
u16 ah_hlen;
u16 cleared_hlen;
u16 nh_offset = 0;
u8 nexthdr = 0;
u8 *prevhdr;
......@@ -291,17 +260,10 @@ int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_bu
goto out;
hdr_len = skb->data - skb->nh.raw;
cleared_hlen = hdr_len;
ah = (struct ipv6_auth_hdr*)skb->data;
ahp = x->data;
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
cleared_hlen += ah_hlen;
if (nexthdr == NEXTHDR_DEST) {
struct ipv6_opt_hdr *dsthdr = (struct ipv6_opt_hdr*)(skb->data + ah_hlen);
cleared_hlen += ipv6_optlen(dsthdr);
}
if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) &&
ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len))
......@@ -310,11 +272,12 @@ int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_bu
if (!pskb_may_pull(skb, ah_hlen))
goto out;
tmp_hdr = kmalloc(cleared_hlen, GFP_ATOMIC);
tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC);
if (!tmp_hdr)
goto out;
memcpy(tmp_hdr, skb->nh.raw, cleared_hlen);
ipv6_clear_mutable_options(skb, &nh_offset, XFRM_POLICY_IN);
memcpy(tmp_hdr, skb->nh.raw, hdr_len);
if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
goto out;
skb->nh.ipv6h->priority = 0;
skb->nh.ipv6h->flow_lbl[0] = 0;
skb->nh.ipv6h->flow_lbl[1] = 0;
......@@ -338,11 +301,6 @@ int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_bu
skb->nh.raw = skb_pull(skb, ah_hlen);
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
if (nexthdr == NEXTHDR_DEST) {
memcpy(skb->nh.raw + hdr_len,
tmp_hdr + hdr_len + ah_hlen,
cleared_hlen - hdr_len - ah_hlen);
}
prevhdr = (u8*)(skb->nh.raw + nh_offset);
*prevhdr = nexthdr;
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
......
#
# Traffic control configuration.
#
choice
prompt "Packet scheduler clock source"
depends on NET_SCHED
default NET_SCH_CLK_JIFFIES
help
Packet schedulers need a monotonic clock that increments at a static
rate. The kernel provides several suitable interfaces, each with
different properties:
- high resolution (us or better)
- fast to read (minimal locking, no i/o access)
- synchronized on all processors
- handles cpu clock frequency changes
but nothing provides all of the above.
config NET_SCH_CLK_JIFFIES
bool "Timer interrupt"
help
Say Y here if you want to use the timer interrupt (jiffies) as clock
source. This clock source is fast, synchronized on all processors and
handles cpu clock frequency changes, but its resolution is too low
for accurate shaping except at very low speed.
config NET_SCH_CLK_GETTIMEOFDAY
bool "gettimeofday"
help
Say Y here if you want to use gettimeofday as clock source. This clock
source has high resolution, is synchronized on all processors and
handles cpu clock frequency changes, but it is slow.
Choose this if you need a high resolution clock source but can't use
the CPU's cycle counter.
config NET_SCH_CLK_CPU
bool "CPU cycle counter"
depends on X86_TSC || X86_64 || SPARC64 || PPC64 || IA64
help
Say Y here if you want to use the CPU's cycle counter as clock source.
This is a cheap and high resolution clock source, but on some
architectures it is not synchronized on all processors and doesn't
handle cpu clock frequency changes.
The useable cycle counters are:
x86/x86_64 - Timestamp Counter
sparc64 - %ticks register
ppc64 - Time base
ia64 - Interval Time Counter
Choose this if your CPU's cycle counter is working properly.
endchoice
config NET_SCH_CBQ
tristate "CBQ packet scheduler"
depends on NET_SCHED
......
......@@ -1088,7 +1088,7 @@ static struct file_operations psched_fops = {
};
#endif
#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
int psched_tod_diff(int delta_sec, int bound)
{
int delta;
......@@ -1103,42 +1103,34 @@ int psched_tod_diff(int delta_sec, int bound)
EXPORT_SYMBOL(psched_tod_diff);
#endif
psched_time_t psched_time_base;
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
#ifdef CONFIG_NET_SCH_CLK_CPU
psched_tdiff_t psched_clock_per_hz;
int psched_clock_scale;
EXPORT_SYMBOL(psched_clock_per_hz);
EXPORT_SYMBOL(psched_clock_scale);
#endif
#ifdef PSCHED_WATCHER
PSCHED_WATCHER psched_time_mark;
psched_time_t psched_time_base;
cycles_t psched_time_mark;
EXPORT_SYMBOL(psched_time_mark);
EXPORT_SYMBOL(psched_time_base);
/*
* Periodically adjust psched_time_base to avoid overflow
* with 32-bit get_cycles(). Safe up to 4GHz CPU.
*/
static void psched_tick(unsigned long);
static struct timer_list psched_timer = TIMER_INITIALIZER(psched_tick, 0, 0);
static void psched_tick(unsigned long dummy)
{
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
psched_time_t dummy_stamp;
PSCHED_GET_TIME(dummy_stamp);
/* It is OK up to 4GHz cpu */
psched_timer.expires = jiffies + 1*HZ;
#else
unsigned long now = jiffies;
psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
psched_time_mark = now;
psched_timer.expires = now + 60*60*HZ;
#endif
add_timer(&psched_timer);
if (sizeof(cycles_t) == sizeof(u32)) {
psched_time_t dummy_stamp;
PSCHED_GET_TIME(dummy_stamp);
psched_timer.expires = jiffies + 1*HZ;
add_timer(&psched_timer);
}
}
#endif
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
int __init psched_calibrate_clock(void)
{
psched_time_t stamp, stamp1;
......@@ -1147,9 +1139,7 @@ int __init psched_calibrate_clock(void)
long rdelay;
unsigned long stop;
#ifdef PSCHED_WATCHER
psched_tick(0);
#endif
stop = jiffies + HZ/10;
PSCHED_GET_TIME(stamp);
do_gettimeofday(&tv);
......@@ -1179,15 +1169,12 @@ static int __init pktsched_init(void)
{
struct rtnetlink_link *link_p;
#if PSCHED_CLOCK_SOURCE == PSCHED_CPU
#ifdef CONFIG_NET_SCH_CLK_CPU
if (psched_calibrate_clock() < 0)
return -1;
#elif PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
psched_tick_per_us = HZ<<PSCHED_JSCALE;
psched_us_per_tick = 1000000;
#ifdef PSCHED_WATCHER
psched_tick(0);
#endif
#endif
link_p = rtnetlink_links[PF_UNSPEC];
......
......@@ -193,7 +193,7 @@ struct hfsc_sched
/*
* macros
*/
#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
#include <linux/time.h>
#undef PSCHED_GET_TIME
#define PSCHED_GET_TIME(stamp) \
......@@ -429,10 +429,10 @@ actlist_get_minvt(struct hfsc_class *cl, u64 cur_time)
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
* Time source resolution
* PSCHED_JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
* PSCHED_CPU: resolution is between 0.5us and 1us.
* PSCHED_GETTIMEOFDAY: resolution is exactly 1us.
* Clock source resolution (CONFIG_NET_SCH_CLK_*)
* JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
* CPU: resolution is between 0.5us and 1us.
* GETTIMEOFDAY: resolution is exactly 1us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
......
......@@ -856,8 +856,13 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
q->now.tv_sec * 1000000ULL + q->now.tv_usec,
cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
#endif
q->jiffies);
diff = 1000;
}
......@@ -927,8 +932,13 @@ static long htb_do_events(struct htb_sched *q,int level)
if (net_ratelimit())
printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
cl->classid, diff,
#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
q->now.tv_sec * 1000000ULL + q->now.tv_usec,
cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
#else
(unsigned long long) q->now,
(unsigned long long) cl->t_c,
#endif
q->jiffies);
diff = 1000;
}
......
......@@ -1093,6 +1093,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >=
min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu)))
......
......@@ -525,10 +525,10 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
struct list_head *lchunk;
struct list_head *lchunk, *lchunk1;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
struct sctp_chunk *chunk;
struct sctp_chunk *chunk, *chunk1;
struct sctp_association *asoc;
int error = 0;
......@@ -615,6 +615,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
* the transmitted list.
*/
list_add_tail(lchunk, &transport->transmitted);
/* Mark the chunk as ineligible for fast retransmit
* after it is retransmitted.
*/
chunk->fast_retransmit = 0;
*start_timer = 1;
q->empty = 0;
......@@ -622,6 +628,18 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
lchunk = sctp_list_dequeue(lqueue);
break;
};
/* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit
* queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
*/
if (rtx_timeout && !lchunk) {
list_for_each(lchunk1, lqueue) {
chunk1 = list_entry(lchunk1, struct sctp_chunk,
transmitted_list);
chunk1->fast_retransmit = 0;
}
}
}
return error;
......
......@@ -1846,9 +1846,8 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
goto clean_up;
spin_lock_bh(&sctp_assocs_id_lock);
error = idr_get_new(&sctp_assocs_id,
(void *)asoc,
&assoc_id);
error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1,
&assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;
......
......@@ -529,6 +529,23 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
}
}
/* Helper function to stop any pending T3-RTX timers */
static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc)
{
struct sctp_transport *t;
struct list_head *pos;
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
if (timer_pending(&t->T3_rtx_timer) &&
del_timer(&t->T3_rtx_timer)) {
sctp_transport_put(t);
}
}
}
/* Helper function to update the heartbeat timer. */
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
......@@ -749,6 +766,26 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
return;
}
/* Helper function to remove the association non-primary peer
* transports.
*/
static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
{
struct sctp_transport *t;
struct list_head *pos;
struct list_head *temp;
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
if (!sctp_cmp_addr_exact(&t->ipaddr,
&asoc->peer.primary_addr)) {
sctp_assoc_del_peer(asoc, &t->ipaddr);
}
}
return;
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
......@@ -1048,6 +1085,27 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
if (cmd->obj.ptr)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(cmd->obj.ptr));
/* FIXME - Eventually come up with a cleaner way to
* enabling COOKIE-ECHO + DATA bundling during
* multihoming stale cookie scenarios, the following
* command plays with asoc->peer.retran_path to
* avoid the problem of sending the COOKIE-ECHO and
* DATA in different paths, which could result
* in the association being ABORTed if the DATA chunk
* is processed first by the server. Checking the
* init error counter simply causes this command
* to be executed only during failed attempts of
* association establishment.
*/
if ((asoc->peer.retran_path !=
asoc->peer.primary_path) &&
(asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) {
sctp_add_cmd_sf(commands,
SCTP_CMD_FORCE_PRIM_RETRAN,
SCTP_NULL());
}
break;
case SCTP_CMD_GEN_SHUTDOWN:
......@@ -1282,6 +1340,19 @@ int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_CMD_CLEAR_INIT_TAG:
asoc->peer.i.init_tag = 0;
break;
case SCTP_CMD_DEL_NON_PRIMARY:
sctp_cmd_del_non_primary(asoc);
break;
case SCTP_CMD_T3_RTX_TIMERS_STOP:
sctp_cmd_t3_rtx_timers_stop(commands, asoc);
break;
case SCTP_CMD_FORCE_PRIM_RETRAN:
t = asoc->peer.retran_path;
asoc->peer.retran_path = asoc->peer.primary_path;
error = sctp_outq_uncork(&asoc->outqueue);
local_cork = 0;
asoc->peer.retran_path = t;
break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
......
This diff is collapsed.
......@@ -1697,6 +1697,32 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
/*
* API 7. Socket Options (setting the default value for the endpoint)
* All options that support specific settings on an association by
* filling in either an association id variable or a sockaddr_storage
* SHOULD also support setting of the same value for the entire endpoint
* (i.e. future associations). To accomplish this the following logic is
* used when setting one of these options:
* c) If neither the sockaddr_storage or association identification is
* set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and
* the association identification is 0, the settings are a default
* and to be applied to the endpoint (all future associations).
*/
/* update default value for endpoint (all future associations) */
if (!params.spp_assoc_id &&
sctp_is_any(( union sctp_addr *)&params.spp_address)) {
if (params.spp_hbinterval)
sctp_sk(sk)->paddrparam.spp_hbinterval =
params.spp_hbinterval;
if (sctp_max_retrans_path)
sctp_sk(sk)->paddrparam.spp_pathmaxrxt =
params.spp_pathmaxrxt;
return 0;
}
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans)
......@@ -2864,6 +2890,17 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
/* If no association id is specified retrieve the default value
* for the endpoint that will be used for all future associations
*/
if (!params.spp_assoc_id &&
sctp_is_any(( union sctp_addr *)&params.spp_address)) {
params.spp_hbinterval = sctp_sk(sk)->paddrparam.spp_hbinterval;
params.spp_pathmaxrxt = sctp_sk(sk)->paddrparam.spp_pathmaxrxt;
goto done;
}
trans = sctp_addr_id2transport(sk, &params.spp_address,
params.spp_assoc_id);
if (!trans)
......@@ -2883,6 +2920,7 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
*/
params.spp_pathmaxrxt = trans->error_threshold;
done:
if (copy_to_user(optval, &params, len))
return -EFAULT;
......
......@@ -204,6 +204,7 @@ static void xfrm_policy_timer(unsigned long data)
return;
expired:
read_unlock(&xp->lock);
km_policy_expired(xp, dir, 1);
xfrm_policy_delete(xp, dir);
xfrm_pol_put(xp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment