Commit b5b2042e authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents bae9e1dd 651c8f88
......@@ -1099,20 +1099,17 @@ M: James.Bottomley@HansenPartnership.com
L: linux-scsi@vger.kernel.org
S: Maintained
NETFILTER
NETFILTER/IPTABLES
P: Rusty Russell
M: rusty@rustcorp.com.au
P: Marc Boucher
M: marc@mbsi.ca
P: James Morris
M: jamesm@intercode.com.au
P: Harald Welte
M: laforge@gnumonks.org
P: Jozsef Kadlecsik
M: kadlec@blackhole.kfki.hu
M: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
L: netfilter@lists.samba.org
L: netfilter@lists.netfilter.org
L: netfilter-devel@lists.netfilter.org
S: Supported
NETROM NETWORK LAYER
......
......@@ -1388,6 +1388,25 @@ static void
ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
if (skb->len >= 2) {
struct sk_buff *new_skb;
/* If this packet is byte aligned, fix that. */
if ((unsigned long)skb->data & 0x1UL) {
int len = skb->len;
if (skb_tailroom(skb) < 124)
len += 128;
new_skb = dev_alloc_skb(len);
if (!new_skb) {
printk(KERN_ERR"PPP: no memory (bad aligned SKB)\n");
goto err;
}
skb_reserve(new_skb, 2);
memcpy(skb_put(new_skb, skb->len), skb->data, skb->len);
kfree_skb(skb);
skb = new_skb;
}
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
......@@ -1401,7 +1420,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
if (skb->len > 0)
/* note: a 0-length skb is used as an error indication */
++ppp->stats.rx_length_errors;
err:
kfree_skb(skb);
ppp_receive_error(ppp);
}
......
......@@ -115,7 +115,7 @@ extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
extern void ip_mc_down(struct in_device *);
extern int ip_mc_dec_group(struct in_device *in_dev, u32 addr);
extern void ip_mc_dec_group(struct in_device *in_dev, u32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, u32 addr);
#endif
#endif
......@@ -27,6 +27,7 @@
#include <linux/config.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <net/checksum.h>
#include <net/sock.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
......@@ -120,8 +121,7 @@ extern struct tcp_hashinfo {
* Now align to a new cache line as all the following members
* are often dirty.
*/
rwlock_t __tcp_lhash_lock
__attribute__((__aligned__(SMP_CACHE_BYTES)));
rwlock_t __tcp_lhash_lock ____cacheline_aligned;
atomic_t __tcp_lhash_users;
wait_queue_head_t __tcp_lhash_wait;
spinlock_t __tcp_portalloc_lock;
......
......@@ -5,8 +5,7 @@
*
*/
#include <asm/system.h>
#include <asm/bitops.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
......
......@@ -530,9 +530,8 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
* A socket has left a multicast group on device dev
*/
int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
void ip_mc_dec_group(struct in_device *in_dev, u32 addr)
{
int err = -ESRCH;
struct ip_mc_list *i, **ip;
ASSERT_RTNL();
......@@ -549,13 +548,11 @@ int ip_mc_dec_group(struct in_device *in_dev, u32 addr)
ip_rt_multicast_event(in_dev);
ip_ma_put(i);
return 0;
return;
}
err = 0;
break;
}
}
return -ESRCH;
}
/* Device going down */
......
......@@ -382,9 +382,9 @@ int ip_nat_helper_register(struct ip_nat_helper *me)
const char *tmp = me->me->name;
if (strlen(tmp) + 6 > MODULE_MAX_NAMELEN) {
printk(__FUNCTION__ ": unable to "
printk("%s: unable to "
"compute conntrack helper name "
"from %s\n", tmp);
"from %s\n", __FUNCTION__, tmp);
return -EBUSY;
}
tmp += 6;
......@@ -467,7 +467,8 @@ void ip_nat_helper_unregister(struct ip_nat_helper *me)
&& ct_helper->me) {
__MOD_DEC_USE_COUNT(ct_helper->me);
} else
printk(__FUNCTION__ ": unable to decrement usage count"
" of conntrack helper %s\n", me->me->name);
printk("%s: unable to decrement usage count"
" of conntrack helper %s\n",
__FUNCTION__, me->me->name);
}
}
......@@ -1779,4 +1779,4 @@ int ipfw_init_or_cleanup(int init)
#endif
return ret;
}
MODULE_LICENSE("BSD without advertisement clause");
MODULE_LICENSE("Dual BSD/GPL");
......@@ -156,7 +156,7 @@
#define dprint_ip(a)
#endif
static rwlock_t ip_fw_lock = RW_LOCK_UNLOCKED;
static DECLARE_RWLOCK(ip_fw_lock);
#if defined(CONFIG_IP_ACCT) || defined(CONFIG_IP_FIREWALL)
......
......@@ -10,6 +10,8 @@
* nlgroup now global (sysctl)
* 2001/04/19 ulog-queue reworked, now fixed buffer size specified at
* module loadtime -HW
* 2002/07/07 remove broken nflog_rcv() function -HW
* 2002/08/29 fix shifted/unshifted nlgroup bug -HW
*
* Released under the terms of the GPL
*
......@@ -29,7 +31,7 @@
* Specify, after how many clock ticks (intel: 100 per second) the queue
* should be flushed even if it is not full yet.
*
* ipt_ULOG.c,v 1.18 2002/04/16 07:33:00 laforge Exp
* ipt_ULOG.c,v 1.21 2002/08/29 10:54:34 laforge Exp
*/
#include <linux/module.h>
......@@ -48,8 +50,11 @@
#include <linux/netfilter_ipv4/ipt_ULOG.h>
#include <linux/netfilter_ipv4/lockhelp.h>
#include <net/sock.h>
#include <asm/bitops.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("IP tables userspace logging module");
#define ULOG_NL_EVENT 111 /* Harald's favorite number */
#define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
......@@ -63,10 +68,6 @@ MODULE_LICENSE("GPL");
#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format, ## args); } while (0)
MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
MODULE_DESCRIPTION("IP tables userspace logging module");
static unsigned int nlbufsiz = 4096;
MODULE_PARM(nlbufsiz, "i");
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
......@@ -91,9 +92,9 @@ static size_t qlen; /* current length of multipart-nlmsg */
DECLARE_LOCK(ulog_lock); /* spinlock */
/* send one ulog_buff_t to userspace */
static void ulog_send(unsigned int nlgroup)
static void ulog_send(unsigned int nlgroupnum)
{
ulog_buff_t *ub = &ulog_buffers[nlgroup];
ulog_buff_t *ub = &ulog_buffers[nlgroupnum];
if (timer_pending(&ub->timer)) {
DEBUGP("ipt_ULOG: ulog_send: timer was pending, deleting\n");
......@@ -104,10 +105,10 @@ static void ulog_send(unsigned int nlgroup)
if (ub->qlen > 1)
ub->lastnlh->nlmsg_type = NLMSG_DONE;
NETLINK_CB(ub->skb).dst_groups = nlgroup;
NETLINK_CB(ub->skb).dst_groups = (1 << nlgroupnum);
DEBUGP("ipt_ULOG: throwing %d packets to netlink mask %u\n",
ub->qlen, nlgroup);
netlink_broadcast(nflognl, ub->skb, 0, nlgroup, GFP_ATOMIC);
netlink_broadcast(nflognl, ub->skb, 0, (1 << nlgroupnum), GFP_ATOMIC);
ub->qlen = 0;
ub->skb = NULL;
......@@ -128,11 +129,6 @@ static void ulog_timer(unsigned long data)
UNLOCK_BH(&ulog_lock);
}
static void nflog_rcv(struct sock *sk, int len)
{
printk("ipt_ULOG:nflog_rcv() did receive netlink message ?!?\n");
}
struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
......@@ -169,6 +165,11 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
struct nlmsghdr *nlh;
struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;
/* ffs == find first bit set, necessary because userspace
* is already shifting groupnumber, but we need unshifted.
* ffs() returns [1..32], we need [0..31] */
unsigned int groupnum = ffs(loginfo->nl_group) - 1;
/* calculate the size of the skb needed */
if ((loginfo->copy_range == 0) ||
(loginfo->copy_range > (*pskb)->len)) {
......@@ -179,7 +180,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
size = NLMSG_SPACE(sizeof(*pm) + copy_len);
ub = &ulog_buffers[loginfo->nl_group];
ub = &ulog_buffers[groupnum];
LOCK_BH(&ulog_lock);
......@@ -191,7 +192,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
/* either the queue len is too high or we don't have
* enough room in nlskb left. send it to userspace. */
ulog_send(loginfo->nl_group);
ulog_send(groupnum);
if (!(ub->skb = ulog_alloc_skb(size)))
goto alloc_failure;
......@@ -325,7 +326,7 @@ static int __init init(void)
ulog_buffers[i].timer.data = i;
}
nflognl = netlink_kernel_create(NETLINK_NFLOG, nflog_rcv);
nflognl = netlink_kernel_create(NETLINK_NFLOG, NULL);
if (!nflognl)
return -ENOMEM;
......
......@@ -663,8 +663,8 @@ static int __init inet6_init(void)
sizeof(struct raw6_sock), 0,
SLAB_HWCACHE_ALIGN, 0, 0);
if (!tcp6_sk_cachep || !udp6_sk_cachep || !raw6_sk_cachep)
printk(KERN_CRIT __FUNCTION__
": Can't create protocol sock SLAB caches!\n");
printk(KERN_CRIT "%s: Can't create protocol sock SLAB "
"caches!\n", __FUNCTION__);
/* Register the socket-side information for inet6_create. */
for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
......
......@@ -1595,7 +1595,7 @@ int sctp_process_param(sctp_association_t *asoc, sctpParam_t param,
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) =
ntohs(param.p->length) -
sizeof(sctp_paramhdr_t);
asoc->peer.cookie = param.cookie->body;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment