Commit 28135721 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/network-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 772a49b7 4562a42b
......@@ -2742,6 +2742,23 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
}
#endif /* not Sparc and not PPC */
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void gem_netpoll(struct net_device *netdev)
{
struct gem *gp = netdev->priv;
if (!gp->pdev)
return;
disable_irq(gp->pdev->irq);
gem_interrupt(gp->pdev->irq, netdev, NULL);
enable_irq(gp->pdev->irq);
}
#endif
static int __devinit gem_get_device_address(struct gem *gp)
{
#if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
......@@ -2940,6 +2957,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
dev->set_multicast_list = gem_set_multicast;
dev->do_ioctl = gem_ioctl;
dev->poll = gem_poll;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = gem_netpoll;
#endif
dev->weight = 64;
dev->ethtool_ops = &gem_ethtool_ops;
dev->tx_timeout = gem_tx_timeout;
......
......@@ -182,8 +182,7 @@ enum {
as offsets from skb->nh.
*/
struct inet6_skb_parm
{
struct inet6_skb_parm {
int iif;
__u16 ra;
__u16 hop;
......@@ -194,6 +193,14 @@ struct inet6_skb_parm
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
/**
* struct ipv6_pinfo - ipv6 private area
*
* In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
* this _must_ be the last member, so that inet6_sk_generic
* is able to calculate its offset from the base struct sock
* by using the struct proto->slab_obj_size member. -acme
*/
struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_addr rcv_saddr;
......@@ -282,10 +289,6 @@ static inline struct raw6_opt * raw6_sk(const struct sock *__sk)
return &((struct raw6_sock *)__sk)->raw6;
}
struct ipv6_sk_offset {
int offset;
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
......
......@@ -37,16 +37,14 @@ static inline struct sock *next_unix_socket(int *i, struct sock *s)
#define forall_unix_sockets(i, s) \
for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
struct unix_address
{
struct unix_address {
atomic_t refcnt;
int len;
unsigned hash;
struct sockaddr_un name[0];
};
struct unix_skb_parms
{
struct unix_skb_parms {
struct ucred creds; /* Skb credentials */
struct scm_fp_list *fp; /* Passed files */
};
......@@ -68,6 +66,7 @@ struct unix_sock {
struct dentry *dentry;
struct vfsmount *mnt;
struct semaphore readsem;
struct sock *peer;
struct sock *other;
struct sock *gc_tree;
atomic_t inflight;
......
......@@ -210,22 +210,11 @@ extern int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(u32 gw, struct net_device *dev);
extern void fib_release_info(struct fib_info *);
extern int fib_semantic_match(int type, struct fib_info *,
const struct flowi *, struct fib_result*);
extern struct fib_info *fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
const struct nlmsghdr *, int *err);
extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *, struct kern_rta *rta, struct fib_info *fi);
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos,
struct fib_info *fi);
extern int ip_fib_check_default(u32 gw, struct net_device *dev);
extern int fib_sync_down(u32 local, struct net_device *dev, int force);
extern int fib_sync_up(struct net_device *dev);
extern int fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
struct kern_rta *rta, struct rtentry *r);
extern void fib_node_seq_show(struct seq_file *seq, int type, int dead,
struct fib_info *fi, u32 prefix, u32 mask);
extern u32 __fib_res_prefsrc(struct fib_result *res);
/* Exported by fib_hash.c */
......
......@@ -142,7 +142,6 @@ struct sock_common {
* @sk_route_caps - route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime - %SO_LINGER l_linger setting
* @sk_hashent - hash entry in several tables (e.g. tcp_ehash)
* @sk_pair - socket pair (e.g. AF_UNIX/unix_peer)
* @sk_backlog - always used with the per-socket spinlock held
* @sk_callback_lock - used with the callbacks in the end of this struct
* @sk_error_queue - rarely used
......@@ -219,7 +218,6 @@ struct sock {
int sk_route_caps;
unsigned long sk_lingertime;
int sk_hashent;
struct sock *sk_pair;
/*
* The backlog queue is special, it is always used with
* the per-socket spinlock held and requires low latency
......@@ -557,7 +555,6 @@ struct proto {
kmem_cache_t *slab;
int slab_obj_size;
void *af_specific;
char name[32];
......
......@@ -1338,7 +1338,6 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
newsk->sk_pair = NULL;
newsk->sk_socket = newsock;
newsk->sk_sleep = &newsock->wait;
......
......@@ -372,7 +372,6 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
skb_queue_head(&sk->sk_receive_queue, skb);
make->sk_state = TCP_ESTABLISHED;
make->sk_pair = sk;
sk->sk_ack_backlog++;
bh_unlock_sock(sk);
......
......@@ -412,7 +412,9 @@ static int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
{
struct sk_buff *skb;
int size = NLMSG_GOODSIZE;
int size = NLMSG_SPACE(sizeof(struct ifinfomsg) +
sizeof(struct rtnl_link_ifmap) +
sizeof(struct rtnl_link_stats) + 128);
skb = alloc_skb(size, GFP_KERNEL);
if (!skb)
......
......@@ -43,6 +43,8 @@
#include <net/sock.h>
#include <net/ip_fib.h>
#include "fib_lookup.h"
static kmem_cache_t *fn_hash_kmem;
static kmem_cache_t *fn_alias_kmem;
......@@ -52,17 +54,6 @@ struct fib_node {
u32 fn_key;
};
struct fib_alias {
struct list_head fa_list;
struct fib_info *fa_info;
u8 fa_tos;
u8 fa_type;
u8 fa_scope;
u8 fa_state;
};
#define FN_S_ACCESSED 1
struct fn_zone {
struct fn_zone *fz_next; /* Next not empty zone */
struct hlist_head *fz_hash; /* Hash table pointer */
......@@ -265,32 +256,14 @@ fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result
head = &fz->fz_hash[fn_hash(k, fz)];
hlist_for_each_entry(f, node, head, fn_hash) {
struct fib_alias *fa;
if (f->fn_key != k)
continue;
list_for_each_entry(fa, &f->fn_alias, fa_list) {
if (fa->fa_tos &&
fa->fa_tos != flp->fl4_tos)
continue;
if (fa->fa_scope < flp->fl4_scope)
continue;
fa->fa_state |= FN_S_ACCESSED;
err = fib_semantic_match(fa->fa_type,
fa->fa_info,
flp, res);
if (err == 0) {
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->prefixlen = fz->fz_order;
goto out;
}
if (err < 0)
goto out;
}
err = fib_semantic_match(&f->fn_alias,
flp, res,
fz->fz_order);
if (err <= 0)
goto out;
}
}
err = 1;
......@@ -358,7 +331,7 @@ fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib
if (!next_fi->fib_nh[0].nh_gw ||
next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
continue;
fa->fa_state |= FN_S_ACCESSED;
fa->fa_state |= FA_S_ACCESSED;
if (fi == NULL) {
if (next_fi != res->fi)
......@@ -521,11 +494,11 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
fa->fa_type = type;
fa->fa_scope = r->rtm_scope;
state = fa->fa_state;
fa->fa_state &= ~FN_S_ACCESSED;
fa->fa_state &= ~FA_S_ACCESSED;
write_unlock_bh(&fib_hash_lock);
fib_release_info(fi_drop);
if (state & FN_S_ACCESSED)
if (state & FA_S_ACCESSED)
rt_cache_flush(-1);
return 0;
}
......@@ -669,7 +642,7 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
}
write_unlock_bh(&fib_hash_lock);
if (fa->fa_state & FN_S_ACCESSED)
if (fa->fa_state & FA_S_ACCESSED)
rt_cache_flush(-1);
fn_free_alias(fa);
if (kill_fn) {
......
#ifndef _FIB_LOOKUP_H
#define _FIB_LOOKUP_H
#include <linux/types.h>
#include <linux/list.h>
#include <net/ip_fib.h>
struct fib_alias {
struct list_head fa_list;
struct fib_info *fa_info;
u8 fa_tos;
u8 fa_type;
u8 fa_scope;
u8 fa_state;
};
#define FA_S_ACCESSED 0x01
/* Exported by fib_semantics.c */
extern int fib_semantic_match(struct list_head *head,
const struct flowi *flp,
struct fib_result *res, int prefixlen);
extern void fib_release_info(struct fib_info *);
extern struct fib_info *fib_create_info(const struct rtmsg *r,
struct kern_rta *rta,
const struct nlmsghdr *,
int *err);
extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *,
struct kern_rta *rta, struct fib_info *fi);
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst,
int dst_len, u8 tos, struct fib_info *fi);
#endif /* _FIB_LOOKUP_H */
......@@ -43,6 +43,8 @@
#include <net/sock.h>
#include <net/ip_fib.h>
#include "fib_lookup.h"
#define FSprintk(a...)
static rwlock_t fib_info_lock = RW_LOCK_UNLOCKED;
......@@ -758,51 +760,73 @@ fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
return NULL;
}
int
fib_semantic_match(int type, struct fib_info *fi, const struct flowi *flp, struct fib_result *res)
int fib_semantic_match(struct list_head *head, const struct flowi *flp,
struct fib_result *res, int prefixlen)
{
int err = fib_props[type].error;
struct fib_alias *fa;
int nh_sel = 0;
if (err == 0) {
if (fi->fib_flags&RTNH_F_DEAD)
return 1;
list_for_each_entry(fa, head, fa_list) {
int err;
res->fi = fi;
if (fa->fa_tos &&
fa->fa_tos != flp->fl4_tos)
continue;
switch (type) {
case RTN_UNICAST:
case RTN_LOCAL:
case RTN_BROADCAST:
case RTN_ANYCAST:
case RTN_MULTICAST:
for_nexthops(fi) {
if (nh->nh_flags&RTNH_F_DEAD)
continue;
if (!flp->oif || flp->oif == nh->nh_oif)
break;
}
if (fa->fa_scope < flp->fl4_scope)
continue;
fa->fa_state |= FA_S_ACCESSED;
err = fib_props[fa->fa_type].error;
if (err == 0) {
struct fib_info *fi = fa->fa_info;
if (fi->fib_flags & RTNH_F_DEAD)
continue;
switch (fa->fa_type) {
case RTN_UNICAST:
case RTN_LOCAL:
case RTN_BROADCAST:
case RTN_ANYCAST:
case RTN_MULTICAST:
for_nexthops(fi) {
if (nh->nh_flags&RTNH_F_DEAD)
continue;
if (!flp->oif || flp->oif == nh->nh_oif)
break;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (nhsel < fi->fib_nhs) {
res->nh_sel = nhsel;
atomic_inc(&fi->fib_clntref);
return 0;
}
if (nhsel < fi->fib_nhs) {
nh_sel = nhsel;
goto out_fill_res;
}
#else
if (nhsel < 1) {
atomic_inc(&fi->fib_clntref);
return 0;
}
if (nhsel < 1) {
goto out_fill_res;
}
#endif
endfor_nexthops(fi);
res->fi = NULL;
return 1;
default:
res->fi = NULL;
printk(KERN_DEBUG "impossible 102\n");
return -EINVAL;
endfor_nexthops(fi);
continue;
default:
printk(KERN_DEBUG "impossible 102\n");
return -EINVAL;
};
}
return err;
}
return err;
return 1;
out_fill_res:
res->prefixlen = prefixlen;
res->nh_sel = nh_sel;
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->fi = fa->fa_info;
atomic_inc(&res->fi->fib_clntref);
return 0;
}
/* Find appropriate source address to this destination */
......
......@@ -691,6 +691,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
skb->ip_summed = CHECKSUM_HW;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
TCP_SKB_CB(skb)->tso_factor = 0;
if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
......@@ -937,6 +938,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
TCP_SKB_CB(skb)->tso_factor = 0;
from += copy;
copied += copy;
......
......@@ -553,7 +553,7 @@ unsigned char * __pskb_trim_head(struct sk_buff *skb, int len)
return skb->tail;
}
static int __tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
static int __tcp_trim_head(struct tcp_opt *tp, struct sk_buff *skb, u32 len)
{
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
......@@ -567,12 +567,18 @@ static int __tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
}
skb->ip_summed = CHECKSUM_HW;
/* Any change of skb->len requires recalculation of tso
* factor and mss.
*/
tcp_set_skb_tso_factor(skb, tp->mss_cache_std);
return 0;
}
static inline int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
static inline int tcp_trim_head(struct tcp_opt *tp, struct sk_buff *skb, u32 len)
{
int err = __tcp_trim_head(sk, skb, len);
int err = __tcp_trim_head(tp, skb, len);
if (!err)
TCP_SKB_CB(skb)->seq += len;
......@@ -897,6 +903,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
((skb_size + next_skb_size) > mss_now))
return;
BUG_ON(TCP_SKB_CB(skb)->tso_factor != 1 ||
TCP_SKB_CB(next_skb)->tso_factor != 1);
/* Ok. We will be able to collapse the packet. */
__skb_unlink(next_skb, next_skb->list);
......@@ -1018,7 +1027,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (skb->len > (data_end_seq - data_seq)) {
u32 to_trim = skb->len - (data_end_seq - data_seq);
if (__tcp_trim_head(sk, skb, to_trim))
if (__tcp_trim_head(tp, skb, to_trim))
return -ENOMEM;
}
......@@ -1032,7 +1041,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
tp->mss_cache = tp->mss_cache_std;
}
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
if (tcp_trim_head(tp, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
return -ENOMEM;
}
......@@ -1080,6 +1089,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
TCP_SKB_CB(skb)->tso_factor = 1;
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
}
......
......@@ -107,9 +107,9 @@ static void inet6_sock_destruct(struct sock *sk)
static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
{
const struct ipv6_sk_offset *offset = sk->sk_prot->af_specific;
const int offset = sk->sk_prot->slab_obj_size - sizeof(struct ipv6_pinfo);
return (struct ipv6_pinfo *)(((u8 *)sk) + offset->offset);
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
static int inet6_create(struct socket *sock, int protocol)
......
......@@ -973,10 +973,6 @@ static int rawv6_init_sk(struct sock *sk)
return(0);
}
struct ipv6_sk_offset raw_sock_offset = {
.offset = offsetof(struct raw6_sock, inet6),
};
struct proto rawv6_prot = {
.name = "RAW",
.close = rawv6_close,
......@@ -994,7 +990,6 @@ struct proto rawv6_prot = {
.hash = raw_v6_hash,
.unhash = raw_v6_unhash,
.slab_obj_size = sizeof(struct raw6_sock),
.af_specific = &raw_sock_offset,
};
#ifdef CONFIG_PROC_FS
......
......@@ -2122,10 +2122,6 @@ void tcp6_proc_exit(void)
}
#endif
struct ipv6_sk_offset tcp_sock_offset = {
.offset = offsetof(struct tcp6_sock, inet6),
};
struct proto tcpv6_prot = {
.name = "TCPv6",
.close = tcp_close,
......@@ -2153,7 +2149,6 @@ struct proto tcpv6_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.slab_obj_size = sizeof(struct tcp6_sock),
.af_specific = &tcp_sock_offset,
};
static struct inet6_protocol tcpv6_protocol = {
......
......@@ -1031,10 +1031,6 @@ void udp6_proc_exit(void) {
/* ------------------------------------------------------------------------ */
struct ipv6_sk_offset udp_sock_offset = {
.offset = offsetof(struct udp6_sock, inet6),
};
struct proto udpv6_prot = {
.name = "UDP",
.close = udpv6_close,
......@@ -1051,7 +1047,6 @@ struct proto udpv6_prot = {
.unhash = udp_v6_unhash,
.get_port = udp_v6_get_port,
.slab_obj_size = sizeof(struct udp6_sock),
.af_specific = &udp_sock_offset,
};
extern struct proto_ops inet6_dgram_ops;
......
......@@ -638,7 +638,6 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags)
newsk = skb->sk;
/* attach connection to a new socket. */
llc_ui_sk_init(newsock, newsk);
newsk->sk_pair = NULL;
newsk->sk_zapped = 0;
newsk->sk_state = TCP_ESTABLISHED;
newsock->state = SS_CONNECTED;
......
......@@ -801,7 +801,6 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
newsk->sk_pair = NULL;
newsk->sk_socket = newsock;
newsk->sk_sleep = &newsock->wait;
......@@ -994,7 +993,6 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk->sk_ack_backlog++;
make->sk_pair = sk;
nr_insert_socket(make);
......
......@@ -882,7 +882,6 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
remove_wait_queue(sk->sk_sleep, &wait);
newsk = skb->sk;
newsk->sk_pair = NULL;
newsk->sk_socket = newsock;
newsk->sk_sleep = &newsock->wait;
......@@ -996,7 +995,6 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
make_rose->vr = 0;
make_rose->vl = 0;
sk->sk_ack_backlog++;
make->sk_pair = sk;
rose_insert_socket(make);
......
......@@ -4626,10 +4626,6 @@ struct proto sctp_prot = {
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct ipv6_sk_offset sctp_sock_offset = {
.offset = offsetof(struct sctp6_sock, inet6),
};
struct proto sctpv6_prot = {
.name = "SCTPv6",
.close = sctp_close,
......@@ -4650,6 +4646,5 @@ struct proto sctpv6_prot = {
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.slab_obj_size = sizeof(struct sctp6_sock),
.af_specific = &sctp_sock_offset,
};
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
......@@ -144,7 +144,7 @@ static inline unsigned unix_hash_fold(unsigned hash)
return hash&(UNIX_HASH_SIZE-1);
}
#define unix_peer(sk) ((sk)->sk_pair)
#define unix_peer(sk) (unix_sk(sk)->peer)
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
......
......@@ -2423,7 +2423,6 @@ static int wanpipe_accept(struct socket *sock, struct socket *newsock, int flags
write_unlock(&wanpipe_sklist_lock);
clear_bit(1,&wanpipe_tx_critical);
newsk->sk_pair = NULL;
newsk->sk_socket = newsock;
newsk->sk_sleep = &newsock->wait;
......
......@@ -769,7 +769,6 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
if (!skb->sk)
goto out2;
newsk = skb->sk;
newsk->sk_pair = NULL;
newsk->sk_socket = newsock;
newsk->sk_sleep = &newsock->wait;
......@@ -887,7 +886,6 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
makex25->state = X25_STATE_3;
sk->sk_ack_backlog++;
make->sk_pair = sk;
x25_insert_socket(make);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment