Commit e32c91c0 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into home.osdl.org:/home/torvalds/v2.5/linux
parents b9575611 d5991ced
......@@ -1567,7 +1567,7 @@ E: mk@linux-ipv6.org
E: mk@isl.rdc.toshiba.co.jp
E: mk@karaba.org
W: http://www.karaba.org/~mk/
P: 1024D/2EC7E30D 9A35 D378 F084 9EA4 EFBA 925B 1C93 B376 F0EF BE59
P: 1024D/2EC7E30D 4DC3 949B 5A6C F0D6 375F 4472 8888 A8E1 2EC7 E30D
D: IPsec, IPv6
D: USAGI/WIDE Project, TOSHIBA CORPORATION
S: 2-47-8, Takinogawa,
......
......@@ -350,7 +350,7 @@ static int __init ethif_probe(int unit)
* Backwards compatibility - historically an I/O base of 1 was
* used to indicate not to probe for this ethN interface
*/
if (dev->base_addr == 1) {
if (__dev_get_by_name(dev->name) || dev->base_addr == 1) {
free_netdev(dev);
return -ENXIO;
}
......
......@@ -159,7 +159,10 @@ struct inet_sock {
struct inet_opt inet;
};
#define inet_sk(__sk) (&((struct inet_sock *)__sk)->inet)
static inline struct inet_opt * inet_sk(const struct sock *__sk)
{
return &((struct inet_sock *)__sk)->inet;
}
#endif
......
......@@ -270,8 +270,15 @@ struct tcp6_sock {
struct ipv6_pinfo inet6;
};
#define inet6_sk(__sk) ((struct raw6_sock *)__sk)->pinet6
#define raw6_sk(__sk) (&((struct raw6_sock *)__sk)->raw6)
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return ((struct raw6_sock *)__sk)->pinet6;
}
static inline struct raw6_opt * raw6_sk(const struct sock *__sk)
{
return &((struct raw6_sock *)__sk)->raw6;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
......
......@@ -634,7 +634,7 @@ static inline void dev_kfree_skb_irq(struct sk_buff *skb)
*/
static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
if (in_irq())
if (in_irq() || irqs_disabled())
dev_kfree_skb_irq(skb);
else
dev_kfree_skb(skb);
......
......@@ -51,6 +51,7 @@
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = INT_MIN,
NF_IP_PRI_SELINUX_FIRST = -225,
NF_IP_PRI_CONNTRACK = -200,
NF_IP_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
NF_IP_PRI_MANGLE = -150,
......@@ -58,6 +59,7 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_BRIDGE_SABOTAGE_LOCAL_OUT = -50,
NF_IP_PRI_FILTER = 0,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_LAST = INT_MAX,
};
......
......@@ -56,11 +56,13 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
NF_IP6_PRI_SELINUX_FIRST = -225,
NF_IP6_PRI_CONNTRACK = -200,
NF_IP6_PRI_MANGLE = -150,
NF_IP6_PRI_NAT_DST = -100,
NF_IP6_PRI_FILTER = 0,
NF_IP6_PRI_NAT_SRC = 100,
NF_IP6_PRI_SELINUX_LAST = 225,
NF_IP6_PRI_LAST = INT_MAX,
};
......
......@@ -1132,7 +1132,7 @@ static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
* is returned and the old skb data released.
*/
extern int __skb_linearize(struct sk_buff *skb, int gfp);
static inline int __deprecated skb_linearize(struct sk_buff *skb, int gfp)
static inline int skb_linearize(struct sk_buff *skb, int gfp)
{
return __skb_linearize(skb, gfp);
}
......
......@@ -60,7 +60,10 @@ struct udp_sock {
struct udp_opt udp;
};
#define udp_sk(__sk) (&((struct udp_sock *)__sk)->udp)
static inline struct udp_opt * udp_sk(const struct sock *__sk)
{
return &((struct udp_sock *)__sk)->udp;
}
#endif
......
......@@ -329,13 +329,6 @@ extern int ip6_nd_hdr(struct sock *sk,
struct in6_addr *daddr,
int proto, int len);
extern int ip6_build_xmit(struct sock *sk,
inet_getfrag_t getfrag,
const void *data,
struct flowi *fl,
unsigned length,
struct ipv6_txoptions *opt,
int hlimit, int flags);
extern int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
extern int ip6_append_data(struct sock *sk,
......
......@@ -52,6 +52,11 @@
/* Same for payload size. See qos.c for the smallest max data size */
#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
/* Those are really defined in include/linux/serial.h - Jean II */
#define ASYNC_B_INITIALIZED 31 /* Serial port was initialized */
#define ASYNC_B_NORMAL_ACTIVE 29 /* Normal device is active */
#define ASYNC_B_CLOSING 27 /* Serial port is closing */
/*
* IrCOMM TTY driver state
*/
......@@ -75,7 +80,7 @@ struct ircomm_tty_cb {
LOCAL_FLOW flow; /* IrTTP flow status */
int line;
__u32 flags;
volatile unsigned long flags;
__u8 dlsap_sel;
__u8 slsap_sel;
......
......@@ -215,15 +215,25 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg,
int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
{
struct compat_timeval ctv;
struct compat_cmsghdr *cm = (struct compat_cmsghdr *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
int cmlen = CMSG_COMPAT_LEN(len);
int cmlen;
if(cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
kmsg->msg_flags |= MSG_CTRUNC;
return 0; /* XXX: return error? check spec. */
}
if (level == SOL_SOCKET && type == SO_TIMESTAMP) {
struct timeval *tv = (struct timeval *)data;
ctv.tv_sec = tv->tv_sec;
ctv.tv_usec = tv->tv_usec;
data = &ctv;
len = sizeof(struct compat_timeval);
}
cmlen = CMSG_COMPAT_LEN(len);
if(kmsg->msg_controllen < cmlen) {
kmsg->msg_flags |= MSG_CTRUNC;
cmlen = kmsg->msg_controllen;
......
......@@ -2376,17 +2376,17 @@ static int __init decnet_init(void)
dn_register_sysctl();
/*
* Prevent DECnet module unloading until its fixed properly.
* Requires an audit of the code to check for memory leaks and
* initialisation problems etc.
*/
try_module_get(THIS_MODULE);
return 0;
}
module_init(decnet_init);
/*
* Prevent DECnet module unloading until its fixed properly.
* Requires an audit of the code to check for memory leaks and
* initialisation problems etc.
*/
#if 0
static void __exit decnet_exit(void)
{
sock_unregister(AF_DECnet);
......@@ -2405,6 +2405,5 @@ static void __exit decnet_exit(void)
kmem_cache_destroy(dn_sk_cachep);
}
module_init(decnet_init);
module_exit(decnet_exit);
#endif
......@@ -1809,6 +1809,54 @@ static void addrconf_sit_config(struct net_device *dev)
sit_route_add(dev);
}
static inline int
ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
{
struct in6_addr lladdr;
if (!ipv6_get_lladdr(link_dev, &lladdr)) {
addrconf_add_linklocal(idev, &lladdr);
return 0;
}
return -1;
}
static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
{
struct net_device *link_dev;
/* first try to inherit the link-local address from the link device */
if (idev->dev->iflink &&
(link_dev = __dev_get_by_index(idev->dev->iflink))) {
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
/* then try to inherit it from any device */
for (link_dev = dev_base; link_dev; link_dev = link_dev->next) {
if (!ipv6_inherit_linklocal(idev, link_dev))
return;
}
printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n");
}
/*
* Autoconfigure tunnel with a link-local address so routing protocols,
* DHCPv6, MLD etc. can be run over the virtual link
*/
static void addrconf_ip6_tnl_config(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
if ((idev = addrconf_add_dev(dev)) == NULL) {
printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
return;
}
ip6_tnl_add_linklocal(idev);
addrconf_add_mroute(dev);
}
int addrconf_notify(struct notifier_block *this, unsigned long event,
void * data)
......@@ -1822,7 +1870,9 @@ int addrconf_notify(struct notifier_block *this, unsigned long event,
case ARPHRD_SIT:
addrconf_sit_config(dev);
break;
case ARPHRD_TUNNEL6:
addrconf_ip6_tnl_config(dev);
break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
......@@ -2121,6 +2171,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
*/
if (ifp->idev->cnf.forwarding == 0 &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
struct in6_addr all_routers;
......
......@@ -304,422 +304,6 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
return 0;
}
static struct ipv6hdr * ip6_bld_1(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
int hlimit, unsigned pktlength)
{
struct ipv6hdr *hdr;
skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr));
hdr = skb->nh.ipv6h;
*(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000);
hdr->payload_len = htons(pktlength - sizeof(struct ipv6hdr));
hdr->hop_limit = hlimit;
hdr->nexthdr = fl->proto;
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, &fl->fl6_dst);
return hdr;
}
static __inline__ u8 * ipv6_build_fraghdr(struct sk_buff *skb, u8* prev_hdr, unsigned offset)
{
struct frag_hdr *fhdr;
fhdr = (struct frag_hdr *) skb_put(skb, sizeof(struct frag_hdr));
fhdr->nexthdr = *prev_hdr;
*prev_hdr = NEXTHDR_FRAGMENT;
prev_hdr = &fhdr->nexthdr;
fhdr->reserved = 0;
fhdr->frag_off = htons(offset);
ipv6_select_ident(skb, fhdr);
return &fhdr->nexthdr;
}
static int ip6_frag_xmit(struct sock *sk, inet_getfrag_t getfrag,
const void *data, struct dst_entry *dst,
struct flowi *fl, struct ipv6_txoptions *opt,
struct in6_addr *final_dst,
int hlimit, int flags, unsigned length, int mtu)
{
struct ipv6hdr *hdr;
struct sk_buff *last_skb;
u8 *prev_hdr;
int unfrag_len;
int frag_len;
int last_len;
int nfrags;
int fhdr_dist;
int frag_off;
int data_off;
int err;
/*
* Fragmentation
*
* Extension header order:
* Hop-by-hop -> Dest0 -> Routing -> Fragment -> Auth -> Dest1 -> rest (...)
*
* We must build the non-fragmented part that
* will be in every packet... this also means
* that other extension headers (Dest, Auth, etc)
* must be considered in the data to be fragmented
*/
unfrag_len = sizeof(struct ipv6hdr) + sizeof(struct frag_hdr);
last_len = length;
if (opt) {
unfrag_len += opt->opt_nflen;
last_len += opt->opt_flen;
}
/*
* Length of fragmented part on every packet but
* the last must be an:
* "integer multiple of 8 octets".
*/
frag_len = (mtu - unfrag_len) & ~0x7;
/* Unfragmentable part exceeds mtu. */
if (frag_len <= 0) {
ipv6_local_error(sk, EMSGSIZE, fl, mtu);
return -EMSGSIZE;
}
nfrags = last_len / frag_len;
/*
* We must send from end to start because of
* UDP/ICMP checksums. We do a funny trick:
* fill the last skb first with the fixed
* header (and its data) and then use it
* to create the following segments and send it
* in the end. If the peer is checking the M_flag
* to trigger the reassembly code then this
* might be a good idea.
*/
frag_off = nfrags * frag_len;
last_len -= frag_off;
if (last_len == 0) {
last_len = frag_len;
frag_off -= frag_len;
nfrags--;
}
data_off = frag_off;
/* And it is implementation problem: for now we assume, that
all the exthdrs will fit to the first fragment.
*/
if (opt) {
if (frag_len < opt->opt_flen) {
ipv6_local_error(sk, EMSGSIZE, fl, mtu);
return -EMSGSIZE;
}
data_off = frag_off - opt->opt_flen;
}
if (flags&MSG_PROBE)
return 0;
last_skb = sock_alloc_send_skb(sk, unfrag_len + frag_len +
dst->dev->hard_header_len + 15,
flags & MSG_DONTWAIT, &err);
if (last_skb == NULL)
return err;
last_skb->dst = dst_clone(dst);
skb_reserve(last_skb, (dst->dev->hard_header_len + 15) & ~15);
hdr = ip6_bld_1(sk, last_skb, fl, hlimit, frag_len+unfrag_len);
prev_hdr = &hdr->nexthdr;
if (opt && opt->opt_nflen)
prev_hdr = ipv6_build_nfrag_opts(last_skb, prev_hdr, opt, final_dst, 0);
prev_hdr = ipv6_build_fraghdr(last_skb, prev_hdr, frag_off);
fhdr_dist = prev_hdr - last_skb->data;
err = getfrag(data, &hdr->saddr, last_skb->tail, data_off, last_len);
if (!err) {
while (nfrags--) {
struct sk_buff *skb;
struct frag_hdr *fhdr2;
skb = skb_copy(last_skb, sk->sk_allocation);
if (skb == NULL) {
IP6_INC_STATS(Ip6FragFails);
kfree_skb(last_skb);
return -ENOMEM;
}
frag_off -= frag_len;
data_off -= frag_len;
fhdr2 = (struct frag_hdr *) (skb->data + fhdr_dist);
/* more flag on */
fhdr2->frag_off = htons(frag_off | 1);
/* Write fragmentable exthdrs to the first chunk */
if (nfrags == 0 && opt && opt->opt_flen) {
ipv6_build_frag_opts(skb, &fhdr2->nexthdr, opt);
frag_len -= opt->opt_flen;
data_off = 0;
}
err = getfrag(data, &hdr->saddr,skb_put(skb, frag_len),
data_off, frag_len);
if (err) {
kfree_skb(skb);
break;
}
IP6_INC_STATS(Ip6FragCreates);
IP6_INC_STATS(Ip6OutRequests);
err = NF_HOOK(PF_INET6,NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
if (err) {
kfree_skb(last_skb);
return err;
}
}
}
if (err) {
IP6_INC_STATS(Ip6FragFails);
kfree_skb(last_skb);
return -EFAULT;
}
hdr->payload_len = htons(unfrag_len + last_len - sizeof(struct ipv6hdr));
/*
* update last_skb to reflect the getfrag we did
* on start.
*/
skb_put(last_skb, last_len);
IP6_INC_STATS(Ip6FragCreates);
IP6_INC_STATS(Ip6FragOKs);
IP6_INC_STATS(Ip6OutRequests);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, last_skb, NULL,dst->dev, ip6_maybe_reroute);
}
int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
struct flowi *fl, unsigned length,
struct ipv6_txoptions *opt, int hlimit, int flags)
{
struct inet_opt *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr final_dst_buf, *final_dst = NULL;
struct dst_entry *dst;
int err = 0;
unsigned int pktlength, jumbolen, mtu;
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
ipv6_addr_copy(&final_dst_buf, &fl->fl6_dst);
final_dst = &final_dst_buf;
ipv6_addr_copy(&fl->fl6_dst, rt0->addr);
}
if (!fl->oif && ipv6_addr_is_multicast(&fl->fl6_dst))
fl->oif = np->mcast_oif;
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst) {
struct rt6_info *rt = (struct rt6_info*)dst;
/* Yes, checking route validity in not connected
case is not very simple. Take into account,
that we do not support routing by source, TOS,
and MSG_DONTROUTE --ANK (980726)
1. If route was host route, check that
cached destination is current.
If it is network route, we still may
check its validity using saved pointer
to the last used address: daddr_cache.
We do not want to save whole address now,
(because main consumer of this service
is tcp, which has not this problem),
so that the last trick works only on connected
sockets.
2. oif also should be the same.
*/
if (((rt->rt6i_dst.plen != 128 ||
ipv6_addr_cmp(&fl->fl6_dst, &rt->rt6i_dst.addr))
&& (np->daddr_cache == NULL ||
ipv6_addr_cmp(&fl->fl6_dst, np->daddr_cache)))
|| (fl->oif && fl->oif != dst->dev->ifindex)) {
dst = NULL;
} else
dst_hold(dst);
}
if (dst == NULL)
dst = ip6_route_output(sk, fl);
if (dst->error) {
IP6_INC_STATS(Ip6OutNoRoutes);
dst_release(dst);
return -ENETUNREACH;
}
if (ipv6_addr_any(&fl->fl6_src)) {
err = ipv6_get_saddr(dst, &fl->fl6_dst, &fl->fl6_src);
if (err) {
#if IP6_DEBUG >= 2
printk(KERN_DEBUG "ip6_build_xmit: "
"no available source address\n");
#endif
goto out;
}
}
pktlength = length;
if (dst) {
if ((err = xfrm_lookup(&dst, fl, sk, 0)) < 0) {
dst_release(dst);
return -ENETUNREACH;
}
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl->fl6_dst))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = dst_metric(dst, RTAX_HOPLIMIT);
}
jumbolen = 0;
if (!inet->hdrincl) {
pktlength += sizeof(struct ipv6hdr);
if (opt)
pktlength += opt->opt_flen + opt->opt_nflen;
if (pktlength > sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
/* Jumbo datagram.
It is assumed, that in the case of hdrincl
jumbo option is supplied by user.
*/
pktlength += 8;
jumbolen = pktlength - sizeof(struct ipv6hdr);
}
}
mtu = dst_pmtu(dst);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
else if (np->pmtudisc == IPV6_PMTUDISC_DONT)
mtu = IPV6_MIN_MTU;
}
/* Critical arithmetic overflow check.
FIXME: may gcc optimize it out? --ANK (980726)
*/
if (pktlength < length) {
ipv6_local_error(sk, EMSGSIZE, fl, mtu);
err = -EMSGSIZE;
goto out;
}
if (flags&MSG_CONFIRM)
dst_confirm(dst);
if (pktlength <= mtu) {
struct sk_buff *skb;
struct ipv6hdr *hdr;
struct net_device *dev = dst->dev;
err = 0;
if (flags&MSG_PROBE)
goto out;
/* alloc skb with mtu as we do in the IPv4 stack for IPsec */
skb = sock_alloc_send_skb(sk, mtu + LL_RESERVED_SPACE(dev),
flags & MSG_DONTWAIT, &err);
if (skb == NULL) {
IP6_INC_STATS(Ip6OutDiscards);
goto out;
}
skb->dst = dst_clone(dst);
skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
hdr = (struct ipv6hdr *) skb->tail;
skb->nh.ipv6h = hdr;
if (!inet->hdrincl) {
ip6_bld_1(sk, skb, fl, hlimit,
jumbolen ? sizeof(struct ipv6hdr) : pktlength);
if (opt || jumbolen) {
u8 *prev_hdr = &hdr->nexthdr;
prev_hdr = ipv6_build_nfrag_opts(skb, prev_hdr, opt, final_dst, jumbolen);
if (opt && opt->opt_flen)
ipv6_build_frag_opts(skb, prev_hdr, opt);
}
}
skb_put(skb, length);
err = getfrag(data, &hdr->saddr,
((char *) hdr) + (pktlength - length),
0, length);
if (!opt || !opt->dst1opt)
skb->h.raw = ((char *) hdr) + (pktlength - length);
if (!err) {
IP6_INC_STATS(Ip6OutRequests);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
} else {
err = -EFAULT;
kfree_skb(skb);
}
} else {
if (inet->hdrincl || jumbolen ||
np->pmtudisc == IPV6_PMTUDISC_DO) {
ipv6_local_error(sk, EMSGSIZE, fl, mtu);
err = -EMSGSIZE;
goto out;
}
err = ip6_frag_xmit(sk, getfrag, data, dst, fl, opt, final_dst, hlimit,
flags, length, mtu);
}
/*
* cleanup
*/
out:
ip6_dst_store(sk, dst,
!ipv6_addr_cmp(&fl->fl6_dst, &np->daddr) ?
&np->daddr : NULL);
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
return err;
}
int ip6_call_ra_chain(struct sk_buff *skb, int sel)
{
struct ip6_ra_chain *ra;
......
......@@ -821,6 +821,8 @@ static void ip6ip6_tnl_link_config(struct ip6_tnl *t)
else
dev->flags &= ~IFF_POINTOPOINT;
dev->iflink = p->link;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr,
p->link, 0);
......@@ -829,8 +831,6 @@ static void ip6ip6_tnl_link_config(struct ip6_tnl *t)
return;
if (rt->rt6i_dev) {
dev->iflink = rt->rt6i_dev->ifindex;
dev->hard_header_len = rt->rt6i_dev->hard_header_len +
sizeof (struct ipv6hdr);
......@@ -1040,7 +1040,6 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
dev->flags |= IFF_NOARP;
dev->iflink = 0;
dev->addr_len = sizeof(struct in6_addr);
}
......
......@@ -181,15 +181,15 @@ void __exit ircomm_tty_cleanup(void)
static int ircomm_tty_startup(struct ircomm_tty_cb *self)
{
notify_t notify;
int ret;
int ret = -ENODEV;
IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
/* Already open */
if (self->flags & ASYNC_INITIALIZED) {
/* Check if already open */
if (test_and_set_bit(ASYNC_B_INITIALIZED, &self->flags)) {
IRDA_DEBUG(2, "%s(), already open so break out!\n", __FUNCTION__ );
return 0;
}
......@@ -213,7 +213,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
self->line);
}
if (!self->ircomm)
return -ENODEV;
goto err;
self->slsap_sel = self->ircomm->slsap_sel;
......@@ -221,12 +221,13 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self)
ret = ircomm_tty_attach_cable(self);
if (ret < 0) {
ERROR("%s(), error attaching cable!\n", __FUNCTION__);
return ret;
goto err;
}
self->flags |= ASYNC_INITIALIZED;
return 0;
err:
clear_bit(ASYNC_B_INITIALIZED, &self->flags);
return ret;
}
/*
......@@ -299,7 +300,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
current->state = TASK_INTERRUPTIBLE;
if (tty_hung_up_p(filp) || !(self->flags & ASYNC_INITIALIZED)){
if (tty_hung_up_p(filp) ||
!test_bit(ASYNC_B_INITIALIZED, &self->flags)) {
retval = (self->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
......@@ -310,7 +312,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
* specified, we cannot return before the IrCOMM link is
* ready
*/
if (!(self->flags & ASYNC_CLOSING) &&
if (!test_bit(ASYNC_B_CLOSING, &self->flags) &&
(do_clocal || (self->settings.dce & IRCOMM_CD)) &&
self->state == IRCOMM_TTY_READY)
{
......@@ -425,7 +427,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) ||
(self->flags & ASYNC_CLOSING)) {
test_bit(ASYNC_B_CLOSING, &self->flags)) {
/* Hm, why are we blocking on ASYNC_CLOSING if we
* do return -EAGAIN/-ERESTARTSYS below anyway?
......@@ -435,7 +437,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
* probably better sleep uninterruptible?
*/
if (wait_event_interruptible(self->close_wait, !(self->flags&ASYNC_CLOSING))) {
if (wait_event_interruptible(self->close_wait, !test_bit(ASYNC_B_CLOSING, &self->flags))) {
WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n",
__FUNCTION__);
return -ERESTARTSYS;
......@@ -530,11 +532,13 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
IRDA_DEBUG(0, "%s(), open count > 0\n", __FUNCTION__ );
return;
}
self->flags |= ASYNC_CLOSING;
/* Hum... Should be test_and_set_bit ??? - Jean II */
set_bit(ASYNC_B_CLOSING, &self->flags);
/* We need to unlock here (we were unlocking at the end of this
* function), because tty_wait_until_sent() may schedule.
* I don't know if the rest should be locked somehow,
* I don't know if the rest should be protected somehow,
* so someone should check. - Jean II */
spin_unlock_irqrestore(&self->spinlock, flags);
......@@ -978,10 +982,12 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
if (!(self->flags & ASYNC_INITIALIZED))
if (!test_and_clear_bit(ASYNC_B_INITIALIZED, &self->flags))
return;
ircomm_tty_detach_cable(self);
spin_lock_irqsave(&self->spinlock, flags);
del_timer(&self->watchdog_timer);
......@@ -998,13 +1004,10 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
self->tx_skb = NULL;
}
ircomm_tty_detach_cable(self);
if (self->ircomm) {
ircomm_close(self->ircomm);
self->ircomm = NULL;
}
self->flags &= ~ASYNC_INITIALIZED;
spin_unlock_irqrestore(&self->spinlock, flags);
}
......
......@@ -64,6 +64,7 @@
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -1767,61 +1768,86 @@ static struct notifier_block packet_netdev_notifier = {
};
#ifdef CONFIG_PROC_FS
static int packet_read_proc(char *buffer, char **start, off_t offset,
int length, int *eof, void *data)
static inline struct sock *packet_seq_idx(loff_t off)
{
off_t pos=0;
off_t begin=0;
int len=0;
struct sock *s;
struct hlist_node *node;
len+= sprintf(buffer,"sk RefCnt Type Proto Iface R Rmem User Inode\n");
sk_for_each(s, node, &packet_sklist) {
if (!off--)
return s;
}
return NULL;
}
static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
{
read_lock(&packet_sklist_lock);
return *pos ? packet_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
sk_for_each(s, node, &packet_sklist) {
struct packet_opt *po = pkt_sk(s);
len+=sprintf(buffer+len,"%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu",
s,
atomic_read(&s->sk_refcnt),
s->sk_type,
ntohs(po->num),
po->ifindex,
po->running,
atomic_read(&s->sk_rmem_alloc),
sock_i_uid(s),
sock_i_ino(s)
);
buffer[len++]='\n';
pos=begin+len;
if(pos<offset) {
len=0;
begin=pos;
}
if(pos>offset+length)
goto done;
static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN)
? sk_head(&packet_sklist)
: sk_next((struct sock*)v) ;
}
static void packet_seq_stop(struct seq_file *seq, void *v)
{
read_unlock(&packet_sklist_lock);
}
static int packet_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
else {
struct sock *s = v;
const struct packet_opt *po = pkt_sk(s);
seq_printf(seq,
"%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
s->sk_type,
ntohs(po->num),
po->ifindex,
po->running,
atomic_read(&s->sk_rmem_alloc),
sock_i_uid(s),
sock_i_ino(s) );
}
*eof = 1;
done:
read_unlock(&packet_sklist_lock);
*start=buffer+(offset-begin);
len-=(offset-begin);
if(len>length)
len=length;
if(len<0)
len=0;
return len;
return 0;
}
static struct seq_operations packet_seq_ops = {
.start = packet_seq_start,
.next = packet_seq_next,
.stop = packet_seq_stop,
.show = packet_seq_show,
};
static int packet_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &packet_seq_ops);
}
static struct file_operations packet_seq_fops = {
.owner = THIS_MODULE,
.open = packet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
static void __exit packet_exit(void)
{
remove_proc_entry("net/packet", 0);
proc_net_remove("packet");
unregister_netdevice_notifier(&packet_netdev_notifier);
sock_unregister(PF_PACKET);
return;
......@@ -1831,9 +1857,8 @@ static int __init packet_init(void)
{
sock_register(&packet_family_ops);
register_netdevice_notifier(&packet_netdev_notifier);
#ifdef CONFIG_PROC_FS
create_proc_read_entry("net/packet", 0, 0, packet_read_proc, NULL);
#endif
proc_net_fops_create("packet", 0, &packet_seq_fops);
return 0;
}
......
......@@ -74,7 +74,7 @@
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x3000e /* major must be matched with number suplied by TC as version */
#define HTB_VER 0x3000f /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
......@@ -308,7 +308,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
rules in it */
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
if ((cl = htb_find(skb->priority,sch)) != NULL)
if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
return cl;
tcf = q->filter_list;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment