Commit 06dbbfef authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  [IPV4]: Explicitly call fib_get_table() in fib_frontend.c
  [NET]: Use BUILD_BUG_ON in net/core/flowi.c
  [NET]: Remove in-code externs for some functions from net/core/dev.c
  [NET]: Don't declare extern variables in net/core/sysctl_net_core.c
  [TCP]: Remove unneeded implicit type cast when calling tcp_minshall_update()
  [NET]: Treat the sign of the result of skb_headroom() consistently
  [9P]: Fix missing unlock before return in p9_mux_poll_start
  [PKT_SCHED]: Fix sch_prio.c build with CONFIG_NETDEVICES_MULTIQUEUE
  [IPV4] ip_gre: sendto/recvfrom NBMA address
  [SCTP]: Consolidate sctp_ulpq_renege_xxx functions
  [NETLINK]: Fix ACK processing after netlink_dump_start
  [VLAN]: MAINTAINERS update
  [DCCP]: Implement SIOCINQ/FIONREAD
  [NET]: Validate device addr prior to interface-up
parents 22fa8d59 03cf786c
...@@ -4148,6 +4148,12 @@ W: http://linuxtv.org ...@@ -4148,6 +4148,12 @@ W: http://linuxtv.org
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained S: Maintained
VLAN (802.1Q)
P: Patrick McHardy
M: kaber@trash.net
L: netdev@vger.kernel.org
S: Maintained
VT1211 HARDWARE MONITOR DRIVER VT1211 HARDWARE MONITOR DRIVER
P: Juerg Haefliger P: Juerg Haefliger
M: juergh@gmail.com M: juergh@gmail.com
......
...@@ -79,12 +79,10 @@ static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen ...@@ -79,12 +79,10 @@ static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen
*/ */
/* /*
* These are checked at init time to see if they are at least 256KB * sysctl_[wr]mem_max are checked at init time to see if they are at
* and increased to 256KB if they are not. This is done to avoid ending * least 256KB and increased to 256KB if they are not. This is done to
* up with socket buffers smaller than the MTU size, * avoid ending up with socket buffers smaller than the MTU size,
*/ */
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
static int __devinit rr_init_one(struct pci_dev *pdev, static int __devinit rr_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
......
...@@ -669,6 +669,8 @@ struct net_device ...@@ -669,6 +669,8 @@ struct net_device
#define HAVE_SET_MAC_ADDR #define HAVE_SET_MAC_ADDR
int (*set_mac_address)(struct net_device *dev, int (*set_mac_address)(struct net_device *dev,
void *addr); void *addr);
#define HAVE_VALIDATE_ADDR
int (*validate_addr)(struct net_device *dev);
#define HAVE_PRIVATE_IOCTL #define HAVE_PRIVATE_IOCTL
int (*do_ioctl)(struct net_device *dev, int (*do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd); struct ifreq *ifr, int cmd);
......
...@@ -994,7 +994,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) ...@@ -994,7 +994,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
* *
* Return the number of bytes of free space at the head of an &sk_buff. * Return the number of bytes of free space at the head of an &sk_buff.
*/ */
static inline int skb_headroom(const struct sk_buff *skb) static inline unsigned int skb_headroom(const struct sk_buff *skb)
{ {
return skb->data - skb->head; return skb->data - skb->head;
} }
...@@ -1347,7 +1347,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, ...@@ -1347,7 +1347,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
* Returns true if modifying the header part of the cloned buffer * Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied. * does not requires the data to be copied.
*/ */
static inline int skb_clone_writable(struct sk_buff *skb, int len) static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
{ {
return !skb_header_cloned(skb) && return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len; skb_headroom(skb) + len <= skb->hdr_len;
......
...@@ -803,7 +803,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) ...@@ -803,7 +803,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
return left <= tcp_max_burst(tp); return left <= tcp_max_burst(tp);
} }
static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
if (skb->len < mss) if (skb->len < mss)
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
extern struct sock *xfrm_nl; extern struct sock *xfrm_nl;
extern u32 sysctl_xfrm_aevent_etime; extern u32 sysctl_xfrm_aevent_etime;
extern u32 sysctl_xfrm_aevent_rseqth; extern u32 sysctl_xfrm_aevent_rseqth;
extern int sysctl_xfrm_larval_drop;
extern u32 sysctl_xfrm_acq_expires;
extern struct mutex xfrm_cfg_mutex; extern struct mutex xfrm_cfg_mutex;
......
...@@ -222,8 +222,10 @@ static int p9_mux_poll_start(struct p9_conn *m) ...@@ -222,8 +222,10 @@ static int p9_mux_poll_start(struct p9_conn *m)
} }
if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
if (vptlast == NULL) if (vptlast == NULL) {
mutex_unlock(&p9_mux_task_lock);
return -ENOMEM; return -ENOMEM;
}
P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
list_add(&m->mux_list, &vptlast->mux_list); list_add(&m->mux_list, &vptlast->mux_list);
......
...@@ -120,6 +120,8 @@ ...@@ -120,6 +120,8 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include "net-sysfs.h"
/* /*
* The list of packet types we will receive (as opposed to discard) * The list of packet types we will receive (as opposed to discard)
* and the routines to invoke. * and the routines to invoke.
...@@ -249,10 +251,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain); ...@@ -249,10 +251,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU(struct softnet_data, softnet_data); DEFINE_PER_CPU(struct softnet_data, softnet_data);
extern int netdev_kobject_init(void);
extern int netdev_register_kobject(struct net_device *);
extern void netdev_unregister_kobject(struct net_device *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
/* /*
* register_netdevice() inits dev->_xmit_lock and sets lockdep class * register_netdevice() inits dev->_xmit_lock and sets lockdep class
...@@ -1007,17 +1005,20 @@ int dev_open(struct net_device *dev) ...@@ -1007,17 +1005,20 @@ int dev_open(struct net_device *dev)
* Call device private open method * Call device private open method
*/ */
set_bit(__LINK_STATE_START, &dev->state); set_bit(__LINK_STATE_START, &dev->state);
if (dev->open) {
if (dev->validate_addr)
ret = dev->validate_addr(dev);
if (!ret && dev->open)
ret = dev->open(dev); ret = dev->open(dev);
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
}
/* /*
* If it went open OK then: * If it went open OK then:
*/ */
if (!ret) { if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
/* /*
* Set the flags. * Set the flags.
*/ */
...@@ -1038,6 +1039,7 @@ int dev_open(struct net_device *dev) ...@@ -1038,6 +1039,7 @@ int dev_open(struct net_device *dev)
*/ */
call_netdevice_notifiers(NETDEV_UP, dev); call_netdevice_notifiers(NETDEV_UP, dev);
} }
return ret; return ret;
} }
......
...@@ -142,8 +142,6 @@ typedef u64 flow_compare_t; ...@@ -142,8 +142,6 @@ typedef u64 flow_compare_t;
typedef u32 flow_compare_t; typedef u32 flow_compare_t;
#endif #endif
extern void flowi_is_missized(void);
/* I hear what you're saying, use memcmp. But memcmp cannot make /* I hear what you're saying, use memcmp. But memcmp cannot make
* important assumptions that we can here, such as alignment and * important assumptions that we can here, such as alignment and
* constant size. * constant size.
...@@ -153,8 +151,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) ...@@ -153,8 +151,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
flow_compare_t *k1, *k1_lim, *k2; flow_compare_t *k1, *k1_lim, *k2;
const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
if (sizeof(struct flowi) % sizeof(flow_compare_t)) BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
flowi_is_missized();
k1 = (flow_compare_t *) key1; k1 = (flow_compare_t *) key1;
k1_lim = k1 + n_elem; k1_lim = k1 + n_elem;
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/wireless.h> #include <linux/wireless.h>
#include <net/iw_handler.h> #include <net/iw_handler.h>
#include "net-sysfs.h"
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
static const char fmt_hex[] = "%#x\n"; static const char fmt_hex[] = "%#x\n";
static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_long_hex[] = "%#lx\n";
......
#ifndef __NET_SYSFS_H__
#define __NET_SYSFS_H__
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
#endif
...@@ -9,25 +9,12 @@ ...@@ -9,25 +9,12 @@
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/socket.h> #include <linux/socket.h>
#include <linux/netdevice.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/xfrm.h>
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
extern int netdev_max_backlog;
extern int weight_p;
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
extern int sysctl_core_destroy_delay;
#ifdef CONFIG_XFRM
extern u32 sysctl_xfrm_aevent_etime;
extern u32 sysctl_xfrm_aevent_rseqth;
extern int sysctl_xfrm_larval_drop;
extern u32 sysctl_xfrm_acq_expires;
#endif
ctl_table core_table[] = { ctl_table core_table[] = {
#ifdef CONFIG_NET #ifdef CONFIG_NET
{ {
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <net/sock.h> #include <net/sock.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <asm/ioctls.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/timer.h> #include <linux/timer.h>
...@@ -378,8 +379,36 @@ EXPORT_SYMBOL_GPL(dccp_poll); ...@@ -378,8 +379,36 @@ EXPORT_SYMBOL_GPL(dccp_poll);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{ {
dccp_pr_debug("entry\n"); int rc = -ENOTCONN;
return -ENOIOCTLCMD;
lock_sock(sk);
if (sk->sk_state == DCCP_LISTEN)
goto out;
switch (cmd) {
case SIOCINQ: {
struct sk_buff *skb;
unsigned long amount = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount of this packet since
* that is all that will be read.
*/
amount = skb->len;
}
rc = put_user(amount, (int __user *)arg);
}
break;
default:
rc = -ENOIOCTLCMD;
break;
}
out:
release_sock(sk);
return rc;
} }
EXPORT_SYMBOL_GPL(dccp_ioctl); EXPORT_SYMBOL_GPL(dccp_ioctl);
......
...@@ -298,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -298,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
static int eth_validate_addr(struct net_device *dev)
{
if (!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
return 0;
}
const struct header_ops eth_header_ops ____cacheline_aligned = { const struct header_ops eth_header_ops ____cacheline_aligned = {
.create = eth_header, .create = eth_header,
.parse = eth_header_parse, .parse = eth_header_parse,
...@@ -317,6 +325,7 @@ void ether_setup(struct net_device *dev) ...@@ -317,6 +325,7 @@ void ether_setup(struct net_device *dev)
dev->change_mtu = eth_change_mtu; dev->change_mtu = eth_change_mtu;
dev->set_mac_address = eth_mac_addr; dev->set_mac_address = eth_mac_addr;
dev->validate_addr = eth_validate_addr;
dev->type = ARPHRD_ETHER; dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN; dev->hard_header_len = ETH_HLEN;
......
...@@ -128,13 +128,14 @@ struct net_device * ip_dev_find(__be32 addr) ...@@ -128,13 +128,14 @@ struct net_device * ip_dev_find(__be32 addr)
struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
struct fib_result res; struct fib_result res;
struct net_device *dev = NULL; struct net_device *dev = NULL;
struct fib_table *local_table;
#ifdef CONFIG_IP_MULTIPLE_TABLES #ifdef CONFIG_IP_MULTIPLE_TABLES
res.r = NULL; res.r = NULL;
#endif #endif
if (!ip_fib_local_table || local_table = fib_get_table(RT_TABLE_LOCAL);
ip_fib_local_table->tb_lookup(ip_fib_local_table, &fl, &res)) if (!local_table || local_table->tb_lookup(local_table, &fl, &res))
return NULL; return NULL;
if (res.type != RTN_LOCAL) if (res.type != RTN_LOCAL)
goto out; goto out;
...@@ -152,6 +153,7 @@ unsigned inet_addr_type(__be32 addr) ...@@ -152,6 +153,7 @@ unsigned inet_addr_type(__be32 addr)
struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
struct fib_result res; struct fib_result res;
unsigned ret = RTN_BROADCAST; unsigned ret = RTN_BROADCAST;
struct fib_table *local_table;
if (ZERONET(addr) || BADCLASS(addr)) if (ZERONET(addr) || BADCLASS(addr))
return RTN_BROADCAST; return RTN_BROADCAST;
...@@ -162,10 +164,10 @@ unsigned inet_addr_type(__be32 addr) ...@@ -162,10 +164,10 @@ unsigned inet_addr_type(__be32 addr)
res.r = NULL; res.r = NULL;
#endif #endif
if (ip_fib_local_table) { local_table = fib_get_table(RT_TABLE_LOCAL);
if (local_table) {
ret = RTN_UNICAST; ret = RTN_UNICAST;
if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, if (!local_table->tb_lookup(local_table, &fl, &res)) {
&fl, &res)) {
ret = res.type; ret = res.type;
fib_res_put(&res); fib_res_put(&res);
} }
......
...@@ -674,7 +674,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -674,7 +674,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct rtable *rt; /* Route to the other host */ struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */ struct iphdr *iph; /* Our new IP header */
int max_headroom; /* The extra header space needed */ unsigned int max_headroom; /* The extra header space needed */
int gre_hlen; int gre_hlen;
__be32 dst; __be32 dst;
int mtu; int mtu;
...@@ -1033,7 +1033,6 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1033,7 +1033,6 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
return 0; return 0;
} }
#ifdef CONFIG_NET_IPGRE_BROADCAST
/* Nice toy. Unfortunately, useless in real life :-) /* Nice toy. Unfortunately, useless in real life :-)
It allows to construct virtual multiprotocol broadcast "LAN" It allows to construct virtual multiprotocol broadcast "LAN"
over the Internet, provided multicast routing is tuned. over the Internet, provided multicast routing is tuned.
...@@ -1092,10 +1091,19 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, ...@@ -1092,10 +1091,19 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
return -t->hlen; return -t->hlen;
} }
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
struct iphdr *iph = (struct iphdr*) skb_mac_header(skb);
memcpy(haddr, &iph->saddr, 4);
return 4;
}
static const struct header_ops ipgre_header_ops = { static const struct header_ops ipgre_header_ops = {
.create = ipgre_header, .create = ipgre_header,
.parse = ipgre_header_parse,
}; };
#ifdef CONFIG_NET_IPGRE_BROADCAST
static int ipgre_open(struct net_device *dev) static int ipgre_open(struct net_device *dev)
{ {
struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel *t = netdev_priv(dev);
...@@ -1197,6 +1205,8 @@ static int ipgre_tunnel_init(struct net_device *dev) ...@@ -1197,6 +1205,8 @@ static int ipgre_tunnel_init(struct net_device *dev)
dev->stop = ipgre_close; dev->stop = ipgre_close;
} }
#endif #endif
} else {
dev->header_ops = &ipgre_header_ops;
} }
if (!tdev && tunnel->parms.link) if (!tdev && tunnel->parms.link)
......
...@@ -161,7 +161,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) ...@@ -161,7 +161,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
struct dst_entry *dst = skb->dst; struct dst_entry *dst = skb->dst;
struct rtable *rt = (struct rtable *)dst; struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev; struct net_device *dev = dst->dev;
int hh_len = LL_RESERVED_SPACE(dev); unsigned int hh_len = LL_RESERVED_SPACE(dev);
if (rt->rt_type == RTN_MULTICAST) if (rt->rt_type == RTN_MULTICAST)
IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
......
...@@ -515,7 +515,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -515,7 +515,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *tdev; /* Device to other host */ struct net_device *tdev; /* Device to other host */
struct iphdr *old_iph = ip_hdr(skb); struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *iph; /* Our new IP header */ struct iphdr *iph; /* Our new IP header */
int max_headroom; /* The extra header space needed */ unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr; __be32 dst = tiph->daddr;
int mtu; int mtu;
......
...@@ -325,7 +325,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ...@@ -325,7 +325,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
__be16 df = old_iph->frag_off; __be16 df = old_iph->frag_off;
sk_buff_data_t old_transport_header = skb->transport_header; sk_buff_data_t old_transport_header = skb->transport_header;
struct iphdr *iph; /* Our new IP header */ struct iphdr *iph; /* Our new IP header */
int max_headroom; /* The extra header space needed */ unsigned int max_headroom; /* The extra header space needed */
int mtu; int mtu;
EnterFunction(10); EnterFunction(10);
......
...@@ -3909,7 +3909,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, ...@@ -3909,7 +3909,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
while (before(start, end)) { while (before(start, end)) {
struct sk_buff *nskb; struct sk_buff *nskb;
int header = skb_headroom(skb); unsigned int header = skb_headroom(skb);
int copy = SKB_MAX_ORDER(header, 0); int copy = SKB_MAX_ORDER(header, 0);
/* Too big header? This can happen with IPv6. */ /* Too big header? This can happen with IPv6. */
......
...@@ -171,7 +171,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, ...@@ -171,7 +171,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
u32 mtu; u32 mtu;
if (opt) { if (opt) {
int head_room; unsigned int head_room;
/* First: exthdrs may take lots of space (~8K for now) /* First: exthdrs may take lots of space (~8K for now)
MAX_HEADER is not enough. MAX_HEADER is not enough.
......
...@@ -838,7 +838,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -838,7 +838,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
struct dst_entry *dst; struct dst_entry *dst;
struct net_device *tdev; struct net_device *tdev;
int mtu; int mtu;
int max_headroom = sizeof(struct ipv6hdr); unsigned int max_headroom = sizeof(struct ipv6hdr);
u8 proto; u8 proto;
int err = -1; int err = -1;
int pkt_len; int pkt_len;
......
...@@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct rtable *rt; /* Route to the other host */ struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */ struct iphdr *iph; /* Our new IP header */
int max_headroom; /* The extra header space needed */ unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr; __be32 dst = tiph->daddr;
int mtu; int mtu;
struct in6_addr *addr6; struct in6_addr *addr6;
......
...@@ -1565,7 +1565,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, ...@@ -1565,7 +1565,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
netlink_dump(sk); netlink_dump(sk);
sock_put(sk); sock_put(sk);
return 0;
/* We successfully started a dump, by returning -EINTR we
* signal not to send ACK even if it was requested.
*/
return -EINTR;
} }
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
...@@ -1619,17 +1623,21 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, ...@@ -1619,17 +1623,21 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
/* Only requests are handled by the kernel */ /* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
goto skip; goto ack;
/* Skip control messages */ /* Skip control messages */
if (nlh->nlmsg_type < NLMSG_MIN_TYPE) if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
goto skip; goto ack;
err = cb(skb, nlh); err = cb(skb, nlh);
skip: if (err == -EINTR)
goto skip;
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err) if (nlh->nlmsg_flags & NLM_F_ACK || err)
netlink_ack(skb, nlh, err); netlink_ack(skb, nlh, err);
skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len); msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len) if (msglen > skb->len)
msglen = skb->len; msglen = skb->len;
......
...@@ -136,7 +136,7 @@ prio_dequeue(struct Qdisc* sch) ...@@ -136,7 +136,7 @@ prio_dequeue(struct Qdisc* sch)
* pulling an skb. This way we avoid excessive requeues * pulling an skb. This way we avoid excessive requeues
* for slower queues. * for slower queues.
*/ */
if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
qdisc = q->queues[prio]; qdisc = q->queues[prio];
skb = qdisc->dequeue(qdisc); skb = qdisc->dequeue(qdisc);
if (skb) { if (skb) {
...@@ -165,7 +165,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch) ...@@ -165,7 +165,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch)
* for slower queues. If the queue is stopped, try the * for slower queues. If the queue is stopped, try the
* next queue. * next queue.
*/ */
if (!netif_subqueue_stopped(sch->dev, if (!__netif_subqueue_stopped(sch->dev,
(q->mq ? q->curband : 0))) { (q->mq ? q->curband : 0))) {
qdisc = q->queues[q->curband]; qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc); skb = qdisc->dequeue(qdisc);
......
...@@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) ...@@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
return; return;
} }
/* Renege 'needed' bytes from the ordering queue. */ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) struct sk_buff_head *list, __u16 needed)
{ {
__u16 freed = 0; __u16 freed = 0;
__u32 tsn; __u32 tsn;
...@@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) ...@@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
tsnmap = &ulpq->asoc->peer.tsn_map; tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { while ((skb = __skb_dequeue_tail(list)) != NULL) {
freed += skb_headlen(skb); freed += skb_headlen(skb);
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
tsn = event->tsn; tsn = event->tsn;
...@@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) ...@@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
return freed; return freed;
} }
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
}
/* Renege 'needed' bytes from the reassembly queue. */ /* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{ {
__u16 freed = 0; return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
/* Walk backwards through the list, reneges the newest tsns. */
while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
} }
/* Partial deliver the first message as there is pressure on rwnd. */ /* Partial deliver the first message as there is pressure on rwnd. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment