Commit 16fb62b6 authored by David S. Miller's avatar David S. Miller
parents a3433f35 f9181f4f
...@@ -8,6 +8,7 @@ header-y += xt_CONNMARK.h ...@@ -8,6 +8,7 @@ header-y += xt_CONNMARK.h
header-y += xt_CONNSECMARK.h header-y += xt_CONNSECMARK.h
header-y += xt_CT.h header-y += xt_CT.h
header-y += xt_DSCP.h header-y += xt_DSCP.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h header-y += xt_LED.h
header-y += xt_MARK.h header-y += xt_MARK.h
header-y += xt_NFLOG.h header-y += xt_NFLOG.h
......
...@@ -76,6 +76,10 @@ enum ip_conntrack_status { ...@@ -76,6 +76,10 @@ enum ip_conntrack_status {
/* Conntrack is a template */ /* Conntrack is a template */
IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE_BIT = 11,
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
/* Conntrack is a fake untracked entry */
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
}; };
/* Connection tracking event types */ /* Connection tracking event types */
......
...@@ -89,6 +89,7 @@ enum nfulnl_attr_config { ...@@ -89,6 +89,7 @@ enum nfulnl_attr_config {
#define NFULNL_COPY_NONE 0x00 #define NFULNL_COPY_NONE 0x00
#define NFULNL_COPY_META 0x01 #define NFULNL_COPY_META 0x01
#define NFULNL_COPY_PACKET 0x02 #define NFULNL_COPY_PACKET 0x02
#define NFULNL_COPY_DISABLED 0x03
#define NFULNL_CFG_F_SEQ 0x0001 #define NFULNL_CFG_F_SEQ 0x0001
#define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 #define NFULNL_CFG_F_SEQ_GLOBAL 0x0002
......
/*
* linux/include/linux/netfilter/xt_IDLETIMER.h
*
* Header file for Xtables timer target module.
*
* Copyright (C) 2004, 2010 Nokia Corporation
* Written by Timo Teras <ext-timo.teras@nokia.com>
*
* Converted to x_tables and forward-ported to 2.6.34
* by Luciano Coelho <luciano.coelho@nokia.com>
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef _XT_IDLETIMER_H
#define _XT_IDLETIMER_H
#include <linux/types.h>
#define MAX_IDLETIMER_LABEL_SIZE 28
struct idletimer_tg_info {
__u32 timeout;
char label[MAX_IDLETIMER_LABEL_SIZE];
/* for kernel module internal use only */
struct idletimer_tg *timer __attribute((aligned(8)));
};
#endif
...@@ -257,7 +257,12 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, ...@@ -257,7 +257,12 @@ extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
u32 seq); u32 seq);
/* Fake conntrack entry for untracked connections */ /* Fake conntrack entry for untracked connections */
extern struct nf_conn nf_conntrack_untracked; DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void)
{
return &__raw_get_cpu_var(nf_conntrack_untracked);
}
extern void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */ /* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void extern void
...@@ -285,9 +290,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct) ...@@ -285,9 +290,9 @@ static inline int nf_ct_is_dying(struct nf_conn *ct)
return test_bit(IPS_DYING_BIT, &ct->status); return test_bit(IPS_DYING_BIT, &ct->status);
} }
static inline int nf_ct_is_untracked(const struct sk_buff *skb) static inline int nf_ct_is_untracked(const struct nf_conn *ct)
{ {
return (skb->nfct == &nf_conntrack_untracked.ct_general); return test_bit(IPS_UNTRACKED_BIT, &ct->status);
} }
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
......
...@@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) ...@@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conn *ct = (struct nf_conn *)skb->nfct; struct nf_conn *ct = (struct nf_conn *)skb->nfct;
int ret = NF_ACCEPT; int ret = NF_ACCEPT;
if (ct && ct != &nf_conntrack_untracked) { if (ct && !nf_ct_is_untracked(ct)) {
if (!nf_ct_is_confirmed(ct)) if (!nf_ct_is_confirmed(ct))
ret = __nf_conntrack_confirm(skb); ret = __nf_conntrack_confirm(skb);
if (likely(ret == NF_ACCEPT)) if (likely(ret == NF_ACCEPT))
......
...@@ -2,13 +2,17 @@ ...@@ -2,13 +2,17 @@
#define _XT_RATEEST_H #define _XT_RATEEST_H
struct xt_rateest { struct xt_rateest {
/* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
struct gnet_stats_basic_packed bstats;
spinlock_t lock;
/* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
struct gnet_stats_rate_est rstats;
/* following fields not accessed in hot path */
struct hlist_node list; struct hlist_node list;
char name[IFNAMSIZ]; char name[IFNAMSIZ];
unsigned int refcnt; unsigned int refcnt;
spinlock_t lock;
struct gnet_estimator params; struct gnet_estimator params;
struct gnet_stats_rate_est rstats;
struct gnet_stats_basic_packed bstats;
struct rcu_head rcu; struct rcu_head rcu;
}; };
......
...@@ -245,8 +245,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) ...@@ -245,8 +245,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
dst_hold(&rt->dst); skb_dst_set_noref(skb, &rt->dst);
skb_dst_set(skb, &rt->dst);
skb->dev = nf_bridge->physindev; skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb); nf_bridge_update_protocol(skb);
...@@ -397,8 +396,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) ...@@ -397,8 +396,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
dst_hold(&rt->dst); skb_dst_set_noref(skb, &rt->dst);
skb_dst_set(skb, &rt->dst);
} }
skb->dev = nf_bridge->physindev; skb->dev = nf_bridge->physindev;
......
...@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, ...@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
skb->len - dataoff, 0); skb->len - dataoff, 0);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
csum = __skb_checksum_complete_head(skb, dataoff + len); return __skb_checksum_complete_head(skb, dataoff + len);
if (!csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
return csum; return csum;
} }
......
...@@ -758,7 +758,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) ...@@ -758,7 +758,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
* about). * about).
*/ */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc_node(countersize, numa_node_id()); counters = vmalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1005,8 +1005,7 @@ static int __do_replace(struct net *net, const char *name, ...@@ -1005,8 +1005,7 @@ static int __do_replace(struct net *net, const char *name,
struct arpt_entry *iter; struct arpt_entry *iter;
ret = 0; ret = 0;
counters = vmalloc_node(num_counters * sizeof(struct xt_counters), counters = vmalloc(num_counters * sizeof(struct xt_counters));
numa_node_id());
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1159,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user, ...@@ -1159,7 +1158,7 @@ static int do_add_counters(struct net *net, const void __user *user,
if (len != size + num_counters * sizeof(struct xt_counters)) if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL; return -EINVAL;
paddc = vmalloc_node(len - size, numa_node_id()); paddc = vmalloc(len - size);
if (!paddc) if (!paddc)
return -ENOMEM; return -ENOMEM;
......
...@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); ...@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
static DEFINE_RWLOCK(queue_lock); static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly; static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly; static unsigned int copy_range __read_mostly;
static unsigned int queue_total; static unsigned int queue_total;
...@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range) ...@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
break; break;
case IPQ_COPY_PACKET: case IPQ_COPY_PACKET:
copy_mode = mode; if (range > 0xFFFF)
range = 0xFFFF;
copy_range = range; copy_range = range;
if (copy_range > 0xFFFF) copy_mode = mode;
copy_range = 0xFFFF;
break; break;
default: default:
...@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id) ...@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
{ {
struct nf_queue_entry *entry = NULL, *i; struct nf_queue_entry *entry = NULL, *i;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) { list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) { if ((unsigned long)i == id) {
...@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id) ...@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
queue_total--; queue_total--;
} }
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return entry; return entry;
} }
...@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) ...@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data) ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{ {
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data); __ipq_flush(cmpfn, data);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
} }
static struct sk_buff * static struct sk_buff *
...@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct timeval tv; struct timeval tv;
read_lock_bh(&queue_lock); switch (ACCESS_ONCE(copy_mode)) {
switch (copy_mode) {
case IPQ_COPY_META: case IPQ_COPY_META:
case IPQ_COPY_NONE: case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg)); size = NLMSG_SPACE(sizeof(*pmsg));
...@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
case IPQ_COPY_PACKET: case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL && if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) { (*errp = skb_checksum_help(entry->skb)))
read_unlock_bh(&queue_lock);
return NULL; return NULL;
}
if (copy_range == 0 || copy_range > entry->skb->len) data_len = ACCESS_ONCE(copy_range);
if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len; data_len = entry->skb->len;
else
data_len = copy_range;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len); size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break; break;
default: default:
*errp = -EINVAL; *errp = -EINVAL;
read_unlock_bh(&queue_lock);
return NULL; return NULL;
} }
read_unlock_bh(&queue_lock);
skb = alloc_skb(size, GFP_ATOMIC); skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) if (!skb)
goto nlmsg_failure; goto nlmsg_failure;
...@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (nskb == NULL) if (nskb == NULL)
return status; return status;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if (!peer_pid) if (!peer_pid)
goto err_out_free_nskb; goto err_out_free_nskb;
...@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
__ipq_enqueue_entry(entry); __ipq_enqueue_entry(entry);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
err_out_free_nskb: err_out_free_nskb:
kfree_skb(nskb); kfree_skb(nskb);
err_out_unlock: err_out_unlock:
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
} }
...@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range) ...@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{ {
int status; int status;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range); status = __ipq_set_mode(mode, range);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
} }
...@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb) ...@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (security_netlink_recv(skb, CAP_NET_ADMIN)) if (security_netlink_recv(skb, CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM); RCV_SKB_FAIL(-EPERM);
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if (peer_pid) { if (peer_pid) {
if (peer_pid != pid) { if (peer_pid != pid) {
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY); RCV_SKB_FAIL(-EBUSY);
} }
} else { } else {
...@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb) ...@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
peer_pid = pid; peer_pid = pid;
} }
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type, status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0)); nlmsglen - NLMSG_LENGTH(0));
...@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this, ...@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
struct netlink_notify *n = ptr; struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset(); __ipq_reset();
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
} }
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = { ...@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int ip_queue_show(struct seq_file *m, void *v) static int ip_queue_show(struct seq_file *m, void *v)
{ {
read_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
seq_printf(m, seq_printf(m,
"Peer PID : %d\n" "Peer PID : %d\n"
...@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v) ...@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
queue_dropped, queue_dropped,
queue_user_dropped); queue_user_dropped);
read_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return 0; return 0;
} }
......
...@@ -928,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) ...@@ -928,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc_node(countersize, numa_node_id()); counters = vmalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1352,7 +1352,7 @@ do_add_counters(struct net *net, const void __user *user, ...@@ -1352,7 +1352,7 @@ do_add_counters(struct net *net, const void __user *user,
if (len != size + num_counters * sizeof(struct xt_counters)) if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL; return -EINVAL;
paddc = vmalloc_node(len - size, numa_node_id()); paddc = vmalloc(len - size);
if (!paddc) if (!paddc)
return -ENOMEM; return -ENOMEM;
......
...@@ -53,12 +53,13 @@ struct clusterip_config { ...@@ -53,12 +53,13 @@ struct clusterip_config {
#endif #endif
enum clusterip_hashmode hash_mode; /* which hashing mode */ enum clusterip_hashmode hash_mode; /* which hashing mode */
u_int32_t hash_initval; /* hash initialization */ u_int32_t hash_initval; /* hash initialization */
struct rcu_head rcu;
}; };
static LIST_HEAD(clusterip_configs); static LIST_HEAD(clusterip_configs);
/* clusterip_lock protects the clusterip_configs list */ /* clusterip_lock protects the clusterip_configs list */
static DEFINE_RWLOCK(clusterip_lock); static DEFINE_SPINLOCK(clusterip_lock);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static const struct file_operations clusterip_proc_fops; static const struct file_operations clusterip_proc_fops;
...@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c) ...@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c)
atomic_inc(&c->refcount); atomic_inc(&c->refcount);
} }
static void clusterip_config_rcu_free(struct rcu_head *head)
{
kfree(container_of(head, struct clusterip_config, rcu));
}
static inline void static inline void
clusterip_config_put(struct clusterip_config *c) clusterip_config_put(struct clusterip_config *c)
{ {
if (atomic_dec_and_test(&c->refcount)) if (atomic_dec_and_test(&c->refcount))
kfree(c); call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
} }
/* decrease the count of entries using/referencing this config. If last /* decrease the count of entries using/referencing this config. If last
...@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c) ...@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c)
static inline void static inline void
clusterip_config_entry_put(struct clusterip_config *c) clusterip_config_entry_put(struct clusterip_config *c)
{ {
write_lock_bh(&clusterip_lock); local_bh_disable();
if (atomic_dec_and_test(&c->entries)) { if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
list_del(&c->list); list_del_rcu(&c->list);
write_unlock_bh(&clusterip_lock); spin_unlock(&clusterip_lock);
local_bh_enable();
dev_mc_del(c->dev, c->clustermac); dev_mc_del(c->dev, c->clustermac);
dev_put(c->dev); dev_put(c->dev);
...@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c) ...@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
#endif #endif
return; return;
} }
write_unlock_bh(&clusterip_lock); local_bh_enable();
} }
static struct clusterip_config * static struct clusterip_config *
...@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip) ...@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip)
{ {
struct clusterip_config *c; struct clusterip_config *c;
list_for_each_entry(c, &clusterip_configs, list) { list_for_each_entry_rcu(c, &clusterip_configs, list) {
if (c->clusterip == clusterip) if (c->clusterip == clusterip)
return c; return c;
} }
...@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry) ...@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry)
{ {
struct clusterip_config *c; struct clusterip_config *c;
read_lock_bh(&clusterip_lock); rcu_read_lock_bh();
c = __clusterip_config_find(clusterip); c = __clusterip_config_find(clusterip);
if (!c) { if (c) {
read_unlock_bh(&clusterip_lock); if (unlikely(!atomic_inc_not_zero(&c->refcount)))
return NULL; c = NULL;
else if (entry)
atomic_inc(&c->entries);
} }
atomic_inc(&c->refcount); rcu_read_unlock_bh();
if (entry)
atomic_inc(&c->entries);
read_unlock_bh(&clusterip_lock);
return c; return c;
} }
...@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, ...@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
} }
#endif #endif
write_lock_bh(&clusterip_lock); spin_lock_bh(&clusterip_lock);
list_add(&c->list, &clusterip_configs); list_add_rcu(&c->list, &clusterip_configs);
write_unlock_bh(&clusterip_lock); spin_unlock_bh(&clusterip_lock);
return c; return c;
} }
...@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void) ...@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void)
#endif #endif
nf_unregister_hook(&cip_arp_ops); nf_unregister_hook(&cip_arp_ops);
xt_unregister_target(&clusterip_tg_reg); xt_unregister_target(&clusterip_tg_reg);
/* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
rcu_barrier_bh();
} }
module_init(clusterip_tg_init); module_init(clusterip_tg_init);
......
...@@ -742,7 +742,7 @@ static int __init nf_nat_init(void) ...@@ -742,7 +742,7 @@ static int __init nf_nat_init(void)
spin_unlock_bh(&nf_nat_lock); spin_unlock_bh(&nf_nat_lock);
/* Initialize fake conntrack so that NAT will skip it */ /* Initialize fake conntrack so that NAT will skip it */
nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
......
...@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum, ...@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum,
return NF_ACCEPT; return NF_ACCEPT;
/* Don't try to NAT if this packet is not conntracked */ /* Don't try to NAT if this packet is not conntracked */
if (ct == &nf_conntrack_untracked) if (nf_ct_is_untracked(ct))
return NF_ACCEPT; return NF_ACCEPT;
nat = nfct_nat(ct); nat = nfct_nat(ct);
......
...@@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, ...@@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
protocol, protocol,
csum_sub(0, hsum))); csum_sub(0, hsum)));
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
csum = __skb_checksum_complete_head(skb, dataoff + len); return __skb_checksum_complete_head(skb, dataoff + len);
if (!csum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
return csum; return csum;
}; };
......
...@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long); ...@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
static DEFINE_RWLOCK(queue_lock); static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly; static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly; static unsigned int copy_range __read_mostly;
static unsigned int queue_total; static unsigned int queue_total;
...@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range) ...@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
break; break;
case IPQ_COPY_PACKET: case IPQ_COPY_PACKET:
copy_mode = mode; if (range > 0xFFFF)
range = 0xFFFF;
copy_range = range; copy_range = range;
if (copy_range > 0xFFFF) copy_mode = mode;
copy_range = 0xFFFF;
break; break;
default: default:
...@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id) ...@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id)
{ {
struct nf_queue_entry *entry = NULL, *i; struct nf_queue_entry *entry = NULL, *i;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) { list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) { if ((unsigned long)i == id) {
...@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id) ...@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id)
queue_total--; queue_total--;
} }
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return entry; return entry;
} }
...@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) ...@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data) ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{ {
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data); __ipq_flush(cmpfn, data);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
} }
static struct sk_buff * static struct sk_buff *
...@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct timeval tv; struct timeval tv;
read_lock_bh(&queue_lock); switch (ACCESS_ONCE(copy_mode)) {
switch (copy_mode) {
case IPQ_COPY_META: case IPQ_COPY_META:
case IPQ_COPY_NONE: case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg)); size = NLMSG_SPACE(sizeof(*pmsg));
...@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) ...@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
case IPQ_COPY_PACKET: case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL && if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
(*errp = skb_checksum_help(entry->skb))) { (*errp = skb_checksum_help(entry->skb)))
read_unlock_bh(&queue_lock);
return NULL; return NULL;
}
if (copy_range == 0 || copy_range > entry->skb->len) data_len = ACCESS_ONCE(copy_range);
if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len; data_len = entry->skb->len;
else
data_len = copy_range;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len); size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break; break;
default: default:
*errp = -EINVAL; *errp = -EINVAL;
read_unlock_bh(&queue_lock);
return NULL; return NULL;
} }
read_unlock_bh(&queue_lock);
skb = alloc_skb(size, GFP_ATOMIC); skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) if (!skb)
goto nlmsg_failure; goto nlmsg_failure;
...@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (nskb == NULL) if (nskb == NULL)
return status; return status;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if (!peer_pid) if (!peer_pid)
goto err_out_free_nskb; goto err_out_free_nskb;
...@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) ...@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
__ipq_enqueue_entry(entry); __ipq_enqueue_entry(entry);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
err_out_free_nskb: err_out_free_nskb:
kfree_skb(nskb); kfree_skb(nskb);
err_out_unlock: err_out_unlock:
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
} }
...@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range) ...@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{ {
int status; int status;
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range); status = __ipq_set_mode(mode, range);
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return status; return status;
} }
...@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb) ...@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (security_netlink_recv(skb, CAP_NET_ADMIN)) if (security_netlink_recv(skb, CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM); RCV_SKB_FAIL(-EPERM);
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if (peer_pid) { if (peer_pid) {
if (peer_pid != pid) { if (peer_pid != pid) {
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY); RCV_SKB_FAIL(-EBUSY);
} }
} else { } else {
...@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb) ...@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
peer_pid = pid; peer_pid = pid;
} }
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type, status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0)); nlmsglen - NLMSG_LENGTH(0));
...@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this, ...@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
struct netlink_notify *n = ptr; struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
write_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset(); __ipq_reset();
write_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
} }
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = { ...@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static int ip6_queue_show(struct seq_file *m, void *v) static int ip6_queue_show(struct seq_file *m, void *v)
{ {
read_lock_bh(&queue_lock); spin_lock_bh(&queue_lock);
seq_printf(m, seq_printf(m,
"Peer PID : %d\n" "Peer PID : %d\n"
...@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v) ...@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v)
queue_dropped, queue_dropped,
queue_user_dropped); queue_user_dropped);
read_unlock_bh(&queue_lock); spin_unlock_bh(&queue_lock);
return 0; return 0;
} }
......
...@@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) ...@@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
(other than comefrom, which userspace doesn't care (other than comefrom, which userspace doesn't care
about). */ about). */
countersize = sizeof(struct xt_counters) * private->number; countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc_node(countersize, numa_node_id()); counters = vmalloc(countersize);
if (counters == NULL) if (counters == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, ...@@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct ip6t_entry *iter; struct ip6t_entry *iter;
ret = 0; ret = 0;
counters = vmalloc_node(num_counters * sizeof(struct xt_counters), counters = vmalloc(num_counters * sizeof(struct xt_counters));
numa_node_id());
if (!counters) { if (!counters) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, ...@@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
if (len != size + num_counters * sizeof(struct xt_counters)) if (len != size + num_counters * sizeof(struct xt_counters))
return -EINVAL; return -EINVAL;
paddc = vmalloc_node(len - size, numa_node_id()); paddc = vmalloc(len - size);
if (!paddc) if (!paddc)
return -ENOMEM; return -ENOMEM;
......
...@@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, ...@@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
type = icmp6h->icmp6_type - 130; type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) && if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) { noct_valid_new[type]) {
skb->nfct = &nf_conntrack_untracked.ct_general; skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW; skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct); nf_conntrack_get(skb->nfct);
return NF_ACCEPT; return NF_ACCEPT;
......
...@@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb) ...@@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb)
} }
/* Memory Tracking Functions. */ /* Memory Tracking Functions. */
static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) static void frag_kfree_skb(struct sk_buff *skb)
{ {
if (work)
*work -= skb->truesize;
atomic_sub(skb->truesize, &nf_init_frags.mem); atomic_sub(skb->truesize, &nf_init_frags.mem);
nf_skb_free(skb); nf_skb_free(skb);
kfree_skb(skb); kfree_skb(skb);
...@@ -335,7 +333,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, ...@@ -335,7 +333,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
fq->q.fragments = next; fq->q.fragments = next;
fq->q.meat -= free_it->len; fq->q.meat -= free_it->len;
frag_kfree_skb(free_it, NULL); frag_kfree_skb(free_it);
} }
} }
...@@ -442,7 +440,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) ...@@ -442,7 +440,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
skb_shinfo(head)->frag_list = head->next; skb_shinfo(head)->frag_list = head->next;
skb_reset_transport_header(head); skb_reset_transport_header(head);
skb_push(head, head->data - skb_network_header(head)); skb_push(head, head->data - skb_network_header(head));
atomic_sub(head->truesize, &nf_init_frags.mem);
for (fp=head->next; fp; fp = fp->next) { for (fp=head->next; fp; fp = fp->next) {
head->data_len += fp->len; head->data_len += fp->len;
...@@ -452,8 +449,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) ...@@ -452,8 +449,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
else if (head->ip_summed == CHECKSUM_COMPLETE) else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum); head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize; head->truesize += fp->truesize;
atomic_sub(fp->truesize, &nf_init_frags.mem);
} }
atomic_sub(head->truesize, &nf_init_frags.mem);
head->next = NULL; head->next = NULL;
head->dev = dev; head->dev = dev;
......
...@@ -424,6 +424,18 @@ config NETFILTER_XT_TARGET_HL ...@@ -424,6 +424,18 @@ config NETFILTER_XT_TARGET_HL
since you can easily create immortal packets that loop since you can easily create immortal packets that loop
forever on the network. forever on the network.
config NETFILTER_XT_TARGET_IDLETIMER
tristate "IDLETIMER target support"
depends on NETFILTER_ADVANCED
help
This option adds the `IDLETIMER' target. Each matching packet
resets the timer associated with label specified when the rule is
added. When the timer expires, it triggers a sysfs notification.
The remaining time for expiration can be read via sysfs.
To compile it as a module, choose M here. If unsure, say N.
config NETFILTER_XT_TARGET_LED config NETFILTER_XT_TARGET_LED
tristate '"LED" target support' tristate '"LED" target support'
depends on LEDS_CLASS && LEDS_TRIGGERS depends on LEDS_CLASS && LEDS_TRIGGERS
......
...@@ -61,6 +61,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o ...@@ -61,6 +61,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
# matches # matches
obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
......
...@@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); ...@@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly; unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max); EXPORT_SYMBOL_GPL(nf_conntrack_max);
struct nf_conn nf_conntrack_untracked __read_mostly; DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
EXPORT_SYMBOL_GPL(nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static int nf_conntrack_hash_rnd_initted; static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd; static unsigned int nf_conntrack_hash_rnd;
...@@ -1181,10 +1181,21 @@ static void nf_ct_release_dying_list(struct net *net) ...@@ -1181,10 +1181,21 @@ static void nf_ct_release_dying_list(struct net *net)
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
} }
static int untrack_refs(void)
{
int cnt = 0, cpu;
for_each_possible_cpu(cpu) {
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
cnt += atomic_read(&ct->ct_general.use) - 1;
}
return cnt;
}
static void nf_conntrack_cleanup_init_net(void) static void nf_conntrack_cleanup_init_net(void)
{ {
/* wait until all references to nf_conntrack_untracked are dropped */ while (untrack_refs() > 0)
while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
schedule(); schedule();
nf_conntrack_helper_fini(); nf_conntrack_helper_fini();
...@@ -1319,10 +1330,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); ...@@ -1319,10 +1330,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600); &nf_conntrack_htable_size, 0600);
void nf_ct_untracked_status_or(unsigned long bits)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(nf_conntrack_untracked, cpu).status |= bits;
}
EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
static int nf_conntrack_init_init_net(void) static int nf_conntrack_init_init_net(void)
{ {
int max_factor = 8; int max_factor = 8;
int ret; int ret, cpu;
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */ * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
...@@ -1361,11 +1381,13 @@ static int nf_conntrack_init_init_net(void) ...@@ -1361,11 +1381,13 @@ static int nf_conntrack_init_init_net(void)
goto err_extend; goto err_extend;
#endif #endif
/* Set up fake conntrack: to never be deleted, not in any hashes */ /* Set up fake conntrack: to never be deleted, not in any hashes */
write_pnet(&nf_conntrack_untracked.ct_net, &init_net); for_each_possible_cpu(cpu) {
atomic_set(&nf_conntrack_untracked.ct_general.use, 1); struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
write_pnet(&ct->ct_net, &init_net);
atomic_set(&ct->ct_general.use, 1);
}
/* - and look it like as a confirmed connection */ /* - and look it like as a confirmed connection */
set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
return 0; return 0;
#ifdef CONFIG_NF_CONNTRACK_ZONES #ifdef CONFIG_NF_CONNTRACK_ZONES
......
...@@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) ...@@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
int err; int err;
/* ignore our fake conntrack entry */ /* ignore our fake conntrack entry */
if (ct == &nf_conntrack_untracked) if (nf_ct_is_untracked(ct))
return 0; return 0;
if (events & (1 << IPCT_DESTROY)) { if (events & (1 << IPCT_DESTROY)) {
......
...@@ -66,9 +66,10 @@ struct nfulnl_instance { ...@@ -66,9 +66,10 @@ struct nfulnl_instance {
u_int16_t group_num; /* number of this queue */ u_int16_t group_num; /* number of this queue */
u_int16_t flags; u_int16_t flags;
u_int8_t copy_mode; u_int8_t copy_mode;
struct rcu_head rcu;
}; };
static DEFINE_RWLOCK(instances_lock); static DEFINE_SPINLOCK(instances_lock);
static atomic_t global_seq; static atomic_t global_seq;
#define INSTANCE_BUCKETS 16 #define INSTANCE_BUCKETS 16
...@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num) ...@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
struct nfulnl_instance *inst; struct nfulnl_instance *inst;
head = &instance_table[instance_hashfn(group_num)]; head = &instance_table[instance_hashfn(group_num)];
hlist_for_each_entry(inst, pos, head, hlist) { hlist_for_each_entry_rcu(inst, pos, head, hlist) {
if (inst->group_num == group_num) if (inst->group_num == group_num)
return inst; return inst;
} }
...@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num) ...@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
{ {
struct nfulnl_instance *inst; struct nfulnl_instance *inst;
read_lock_bh(&instances_lock); rcu_read_lock_bh();
inst = __instance_lookup(group_num); inst = __instance_lookup(group_num);
if (inst) if (inst && !atomic_inc_not_zero(&inst->use))
instance_get(inst); inst = NULL;
read_unlock_bh(&instances_lock); rcu_read_unlock_bh();
return inst; return inst;
} }
static void nfulnl_instance_free_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct nfulnl_instance, rcu));
module_put(THIS_MODULE);
}
static void static void
instance_put(struct nfulnl_instance *inst) instance_put(struct nfulnl_instance *inst)
{ {
if (inst && atomic_dec_and_test(&inst->use)) { if (inst && atomic_dec_and_test(&inst->use))
kfree(inst); call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
module_put(THIS_MODULE);
}
} }
static void nfulnl_timer(unsigned long data); static void nfulnl_timer(unsigned long data);
...@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid) ...@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
struct nfulnl_instance *inst; struct nfulnl_instance *inst;
int err; int err;
write_lock_bh(&instances_lock); spin_lock_bh(&instances_lock);
if (__instance_lookup(group_num)) { if (__instance_lookup(group_num)) {
err = -EEXIST; err = -EEXIST;
goto out_unlock; goto out_unlock;
...@@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid) ...@@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid)
inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_mode = NFULNL_COPY_PACKET;
inst->copy_range = NFULNL_COPY_RANGE_MAX; inst->copy_range = NFULNL_COPY_RANGE_MAX;
hlist_add_head(&inst->hlist, hlist_add_head_rcu(&inst->hlist,
&instance_table[instance_hashfn(group_num)]); &instance_table[instance_hashfn(group_num)]);
write_unlock_bh(&instances_lock); spin_unlock_bh(&instances_lock);
return inst; return inst;
out_unlock: out_unlock:
write_unlock_bh(&instances_lock); spin_unlock_bh(&instances_lock);
return ERR_PTR(err); return ERR_PTR(err);
} }
static void __nfulnl_flush(struct nfulnl_instance *inst); static void __nfulnl_flush(struct nfulnl_instance *inst);
/* called with BH disabled */
static void static void
__instance_destroy(struct nfulnl_instance *inst) __instance_destroy(struct nfulnl_instance *inst)
{ {
/* first pull it out of the global list */ /* first pull it out of the global list */
hlist_del(&inst->hlist); hlist_del_rcu(&inst->hlist);
/* then flush all pending packets from skb */ /* then flush all pending packets from skb */
spin_lock_bh(&inst->lock); spin_lock(&inst->lock);
/* lockless readers wont be able to use us */
inst->copy_mode = NFULNL_COPY_DISABLED;
if (inst->skb) if (inst->skb)
__nfulnl_flush(inst); __nfulnl_flush(inst);
spin_unlock_bh(&inst->lock); spin_unlock(&inst->lock);
/* and finally put the refcount */ /* and finally put the refcount */
instance_put(inst); instance_put(inst);
...@@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst) ...@@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst)
static inline void static inline void
instance_destroy(struct nfulnl_instance *inst) instance_destroy(struct nfulnl_instance *inst)
{ {
write_lock_bh(&instances_lock); spin_lock_bh(&instances_lock);
__instance_destroy(inst); __instance_destroy(inst);
write_unlock_bh(&instances_lock); spin_unlock_bh(&instances_lock);
} }
static int static int
...@@ -621,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf, ...@@ -621,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf,
size += nla_total_size(data_len); size += nla_total_size(data_len);
break; break;
case NFULNL_COPY_DISABLED:
default: default:
goto unlock_and_release; goto unlock_and_release;
} }
...@@ -674,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this, ...@@ -674,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
int i; int i;
/* destroy all instances for this pid */ /* destroy all instances for this pid */
write_lock_bh(&instances_lock); spin_lock_bh(&instances_lock);
for (i = 0; i < INSTANCE_BUCKETS; i++) { for (i = 0; i < INSTANCE_BUCKETS; i++) {
struct hlist_node *tmp, *t2; struct hlist_node *tmp, *t2;
struct nfulnl_instance *inst; struct nfulnl_instance *inst;
...@@ -686,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this, ...@@ -686,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
__instance_destroy(inst); __instance_destroy(inst);
} }
} }
write_unlock_bh(&instances_lock); spin_unlock_bh(&instances_lock);
} }
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -863,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st) ...@@ -863,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&instance_table[st->bucket])) if (!hlist_empty(&instance_table[st->bucket]))
return instance_table[st->bucket].first; return rcu_dereference_bh(instance_table[st->bucket].first);
} }
return NULL; return NULL;
} }
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
{ {
h = h->next; h = rcu_dereference_bh(h->next);
while (!h) { while (!h) {
if (++st->bucket >= INSTANCE_BUCKETS) if (++st->bucket >= INSTANCE_BUCKETS)
return NULL; return NULL;
h = instance_table[st->bucket].first; h = rcu_dereference_bh(instance_table[st->bucket].first);
} }
return h; return h;
} }
...@@ -892,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos) ...@@ -892,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
} }
static void *seq_start(struct seq_file *seq, loff_t *pos) static void *seq_start(struct seq_file *seq, loff_t *pos)
__acquires(instances_lock) __acquires(rcu_bh)
{ {
read_lock_bh(&instances_lock); rcu_read_lock_bh();
return get_idx(seq->private, *pos); return get_idx(seq->private, *pos);
} }
...@@ -905,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos) ...@@ -905,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
} }
static void seq_stop(struct seq_file *s, void *v) static void seq_stop(struct seq_file *s, void *v)
__releases(instances_lock) __releases(rcu_bh)
{ {
read_unlock_bh(&instances_lock); rcu_read_unlock_bh();
} }
static int seq_show(struct seq_file *s, void *v) static int seq_show(struct seq_file *s, void *v)
......
...@@ -46,17 +46,19 @@ struct nfqnl_instance { ...@@ -46,17 +46,19 @@ struct nfqnl_instance {
int peer_pid; int peer_pid;
unsigned int queue_maxlen; unsigned int queue_maxlen;
unsigned int copy_range; unsigned int copy_range;
unsigned int queue_total;
unsigned int queue_dropped; unsigned int queue_dropped;
unsigned int queue_user_dropped; unsigned int queue_user_dropped;
unsigned int id_sequence; /* 'sequence' of pkt ids */
u_int16_t queue_num; /* number of this queue */ u_int16_t queue_num; /* number of this queue */
u_int8_t copy_mode; u_int8_t copy_mode;
/*
spinlock_t lock; * Following fields are dirtied for each queued packet,
* keep them in same cache line if possible.
*/
spinlock_t lock;
unsigned int queue_total;
atomic_t id_sequence; /* 'sequence' of pkt ids */
struct list_head queue_list; /* packets in queue */ struct list_head queue_list; /* packets in queue */
}; };
...@@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
outdev = entry->outdev; outdev = entry->outdev;
spin_lock_bh(&queue->lock); switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
switch ((enum nfqnl_config_mode)queue->copy_mode) {
case NFQNL_COPY_META: case NFQNL_COPY_META:
case NFQNL_COPY_NONE: case NFQNL_COPY_NONE:
break; break;
case NFQNL_COPY_PACKET: case NFQNL_COPY_PACKET:
if (entskb->ip_summed == CHECKSUM_PARTIAL && if (entskb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(entskb)) { skb_checksum_help(entskb))
spin_unlock_bh(&queue->lock);
return NULL; return NULL;
}
if (queue->copy_range == 0 data_len = ACCESS_ONCE(queue->copy_range);
|| queue->copy_range > entskb->len) if (data_len == 0 || data_len > entskb->len)
data_len = entskb->len; data_len = entskb->len;
else
data_len = queue->copy_range;
size += nla_total_size(data_len); size += nla_total_size(data_len);
break; break;
} }
entry->id = queue->id_sequence++;
spin_unlock_bh(&queue->lock);
skb = alloc_skb(size, GFP_ATOMIC); skb = alloc_skb(size, GFP_ATOMIC);
if (!skb) if (!skb)
...@@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ...@@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
nfmsg->version = NFNETLINK_V0; nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num); nfmsg->res_id = htons(queue->queue_num);
entry->id = atomic_inc_return(&queue->id_sequence);
pmsg.packet_id = htonl(entry->id); pmsg.packet_id = htonl(entry->id);
pmsg.hw_protocol = entskb->protocol; pmsg.hw_protocol = entskb->protocol;
pmsg.hook = entry->hook; pmsg.hook = entry->hook;
...@@ -868,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v) ...@@ -868,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v)
inst->peer_pid, inst->queue_total, inst->peer_pid, inst->queue_total,
inst->copy_mode, inst->copy_range, inst->copy_mode, inst->copy_range,
inst->queue_dropped, inst->queue_user_dropped, inst->queue_dropped, inst->queue_user_dropped,
inst->id_sequence, 1); atomic_read(&inst->id_sequence), 1);
} }
static const struct seq_operations nfqnl_seq_ops = { static const struct seq_operations nfqnl_seq_ops = {
......
...@@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par) ...@@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
return -EINVAL; return -EINVAL;
if (info->flags & XT_CT_NOTRACK) { if (info->flags & XT_CT_NOTRACK) {
ct = &nf_conntrack_untracked; ct = nf_ct_untracked_get();
atomic_inc(&ct->ct_general.use); atomic_inc(&ct->ct_general.use);
goto out; goto out;
} }
...@@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par) ...@@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
struct nf_conn *ct = info->ct; struct nf_conn *ct = info->ct;
struct nf_conn_help *help; struct nf_conn_help *help;
if (ct != &nf_conntrack_untracked) { if (!nf_ct_is_untracked(ct)) {
help = nfct_help(ct); help = nfct_help(ct);
if (help) if (help)
module_put(help->helper->me); module_put(help->helper->me);
......
/*
* linux/net/netfilter/xt_IDLETIMER.c
*
* Netfilter module to trigger a timer when packet matches.
* After timer expires a kevent will be sent.
*
* Copyright (C) 2004, 2010 Nokia Corporation
* Written by Timo Teras <ext-timo.teras@nokia.com>
*
* Converted to x_tables and reworked for upstream inclusion
* by Luciano Coelho <luciano.coelho@nokia.com>
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_IDLETIMER.h>
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
struct idletimer_tg_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
};
struct idletimer_tg {
struct list_head entry;
struct timer_list timer;
struct work_struct work;
struct kobject *kobj;
struct idletimer_tg_attr attr;
unsigned int refcnt;
};
static LIST_HEAD(idletimer_tg_list);
static DEFINE_MUTEX(list_mutex);
static struct kobject *idletimer_tg_kobj;
static
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
struct idletimer_tg *entry;
BUG_ON(!label);
list_for_each_entry(entry, &idletimer_tg_list, entry) {
if (!strcmp(label, entry->attr.attr.name))
return entry;
}
return NULL;
}
static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct idletimer_tg *timer;
unsigned long expires = 0;
mutex_lock(&list_mutex);
timer = __idletimer_tg_find_by_label(attr->name);
if (timer)
expires = timer->timer.expires;
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies))
return sprintf(buf, "%u\n",
jiffies_to_msecs(expires - jiffies) / 1000);
return sprintf(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
{
struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
work);
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
}
static void idletimer_tg_expired(unsigned long data)
{
struct idletimer_tg *timer = (struct idletimer_tg *) data;
pr_debug("timer %s expired\n", timer->attr.attr.name);
schedule_work(&timer->work);
}
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
pr_debug("couldn't alloc timer\n");
ret = -ENOMEM;
goto out;
}
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
pr_debug("couldn't alloc attribute name\n");
ret = -ENOMEM;
goto out_free_timer;
}
info->timer->attr.attr.mode = S_IRUGO;
info->timer->attr.show = idletimer_tg_show;
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
pr_debug("couldn't add file to sysfs");
goto out_free_attr;
}
list_add(&info->timer->entry, &idletimer_tg_list);
setup_timer(&info->timer->timer, idletimer_tg_expired,
(unsigned long) info->timer);
info->timer->refcnt = 1;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
INIT_WORK(&info->timer->work, idletimer_tg_work);
return 0;
out_free_attr:
kfree(info->timer->attr.attr.name);
out_free_timer:
kfree(info->timer);
out:
return ret;
}
/*
* The actual xt_tables plugin.
*/
static unsigned int idletimer_tg_target(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
BUG_ON(!info->timer);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
return XT_CONTINUE;
}
static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
{
struct idletimer_tg_info *info = par->targinfo;
int ret;
pr_debug("checkentry targinfo%s\n", info->label);
if (info->timeout == 0) {
pr_debug("timeout value is zero\n");
return -EINVAL;
}
if (info->label[0] == '\0' ||
strnlen(info->label,
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
pr_debug("label is empty or not nul-terminated\n");
return -EINVAL;
}
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
info->timer->refcnt++;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
} else {
ret = idletimer_tg_create(info);
if (ret < 0) {
pr_debug("failed to create timer\n");
mutex_unlock(&list_mutex);
return ret;
}
}
mutex_unlock(&list_mutex);
return 0;
}
static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
pr_debug("destroy targinfo %s\n", info->label);
mutex_lock(&list_mutex);
if (--info->timer->refcnt == 0) {
pr_debug("deleting timer %s\n", info->label);
list_del(&info->timer->entry);
del_timer_sync(&info->timer->timer);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
pr_debug("decreased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
}
mutex_unlock(&list_mutex);
}
static struct xt_target idletimer_tg __read_mostly = {
.name = "IDLETIMER",
.family = NFPROTO_UNSPEC,
.target = idletimer_tg_target,
.targetsize = sizeof(struct idletimer_tg_info),
.checkentry = idletimer_tg_checkentry,
.destroy = idletimer_tg_destroy,
.me = THIS_MODULE,
};
static struct class *idletimer_tg_class;
static struct device *idletimer_tg_device;
static int __init idletimer_tg_init(void)
{
int err;
idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
err = PTR_ERR(idletimer_tg_class);
if (IS_ERR(idletimer_tg_class)) {
pr_debug("couldn't register device class\n");
goto out;
}
idletimer_tg_device = device_create(idletimer_tg_class, NULL,
MKDEV(0, 0), NULL, "timers");
err = PTR_ERR(idletimer_tg_device);
if (IS_ERR(idletimer_tg_device)) {
pr_debug("couldn't register system device\n");
goto out_class;
}
idletimer_tg_kobj = &idletimer_tg_device->kobj;
err = xt_register_target(&idletimer_tg);
if (err < 0) {
pr_debug("couldn't register xt target\n");
goto out_dev;
}
return 0;
out_dev:
device_destroy(idletimer_tg_class, MKDEV(0, 0));
out_class:
class_destroy(idletimer_tg_class);
out:
return err;
}
static void __exit idletimer_tg_exit(void)
{
xt_unregister_target(&idletimer_tg);
device_destroy(idletimer_tg_class, MKDEV(0, 0));
class_destroy(idletimer_tg_class);
}
module_init(idletimer_tg_init);
module_exit(idletimer_tg_exit);
MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
...@@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
If there is a real ct entry correspondig to this packet, If there is a real ct entry correspondig to this packet,
it'll hang aroun till timing out. We don't deal with it it'll hang aroun till timing out. We don't deal with it
for performance reasons. JK */ for performance reasons. JK */
skb->nfct = &nf_conntrack_untracked.ct_general; skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW; skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct); nf_conntrack_get(skb->nfct);
......
...@@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef WITH_CONNTRACK #ifdef WITH_CONNTRACK
/* Avoid counting cloned packets towards the original connection. */ /* Avoid counting cloned packets towards the original connection. */
nf_conntrack_put(skb->nfct); nf_conntrack_put(skb->nfct);
skb->nfct = &nf_conntrack_untracked.ct_general; skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW; skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct); nf_conntrack_get(skb->nfct);
#endif #endif
...@@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
#ifdef WITH_CONNTRACK #ifdef WITH_CONNTRACK
nf_conntrack_put(skb->nfct); nf_conntrack_put(skb->nfct);
skb->nfct = &nf_conntrack_untracked.ct_general; skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW; skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct); nf_conntrack_get(skb->nfct);
#endif #endif
......
...@@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
if (ct == NULL) if (ct == NULL)
return false; return false;
if (ct == &nf_conntrack_untracked) if (nf_ct_is_untracked(ct))
return false; return false;
if (ct->master) if (ct->master)
......
...@@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, ...@@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
if (ct == &nf_conntrack_untracked) if (ct) {
statebit = XT_CONNTRACK_STATE_UNTRACKED; if (nf_ct_is_untracked(ct))
else if (ct != NULL) statebit = XT_CONNTRACK_STATE_UNTRACKED;
statebit = XT_CONNTRACK_STATE_BIT(ctinfo); else
else statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
} else
statebit = XT_CONNTRACK_STATE_INVALID; statebit = XT_CONNTRACK_STATE_INVALID;
if (info->match_flags & XT_CONNTRACK_STATE) { if (info->match_flags & XT_CONNTRACK_STATE) {
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/sctp/sctp.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/netfilter/x_tables.h> #include <linux/netfilter/x_tables.h>
...@@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb, ...@@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb,
++i, offset, sch->type, htons(sch->length), ++i, offset, sch->type, htons(sch->length),
sch->flags); sch->flags);
#endif #endif
offset += (ntohs(sch->length) + 3) & ~3; offset += WORD_ROUND(ntohs(sch->length));
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
......
...@@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, ...@@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
* reply packet of an established SNAT-ted connection. */ * reply packet of an established SNAT-ted connection. */
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
if (ct && (ct != &nf_conntrack_untracked) && if (ct && !nf_ct_is_untracked(ct) &&
((iph->protocol != IPPROTO_ICMP && ((iph->protocol != IPPROTO_ICMP &&
ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
(iph->protocol == IPPROTO_ICMP && (iph->protocol == IPPROTO_ICMP &&
......
...@@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_state_info *sinfo = par->matchinfo; const struct xt_state_info *sinfo = par->matchinfo;
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
unsigned int statebit; unsigned int statebit;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (nf_ct_is_untracked(skb)) if (!ct)
statebit = XT_STATE_UNTRACKED;
else if (!nf_ct_get(skb, &ctinfo))
statebit = XT_STATE_INVALID; statebit = XT_STATE_INVALID;
else else {
statebit = XT_STATE_BIT(ctinfo); if (nf_ct_is_untracked(ct))
statebit = XT_STATE_UNTRACKED;
else
statebit = XT_STATE_BIT(ctinfo);
}
return (sinfo->statemask & statebit); return (sinfo->statemask & statebit);
} }
......
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
#include <linux/netfilter/x_tables.h> #include <linux/netfilter/x_tables.h>
struct xt_statistic_priv { struct xt_statistic_priv {
uint32_t count; atomic_t count;
}; } ____cacheline_aligned_in_smp;
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
...@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); ...@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
MODULE_ALIAS("ipt_statistic"); MODULE_ALIAS("ipt_statistic");
MODULE_ALIAS("ip6t_statistic"); MODULE_ALIAS("ip6t_statistic");
static DEFINE_SPINLOCK(nth_lock);
static bool static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{ {
const struct xt_statistic_info *info = par->matchinfo; const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT; bool ret = info->flags & XT_STATISTIC_INVERT;
int nval, oval;
switch (info->mode) { switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM: case XT_STATISTIC_MODE_RANDOM:
...@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
ret = !ret; ret = !ret;
break; break;
case XT_STATISTIC_MODE_NTH: case XT_STATISTIC_MODE_NTH:
spin_lock_bh(&nth_lock); do {
if (info->master->count++ == info->u.nth.every) { oval = atomic_read(&info->master->count);
info->master->count = 0; nval = (oval == info->u.nth.every) ? 0 : oval + 1;
} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
if (nval == 0)
ret = !ret; ret = !ret;
}
spin_unlock_bh(&nth_lock);
break; break;
} }
...@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) ...@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
if (info->master == NULL) if (info->master == NULL)
return -ENOMEM; return -ENOMEM;
info->master->count = info->u.nth.count; atomic_set(&info->master->count, info->u.nth.count);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment