Commit cc41c84b authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: kill the fake untracked conntrack objects

resurrect an old patch from Pablo Neira to remove the untracked objects.

Currently, there are four possible states of an skb wrt. conntrack.

1. No conntrack attached, ct is NULL.
2. Normal (kmem cache allocated) ct attached.
3. a template (kmalloc'd), not in any hash tables at any point in time
4. the 'untracked' conntrack, a percpu nf_conn object, tagged via
   IPS_UNTRACKED_BIT in ct->status.

Untracked is supposed to be identical to case 1.  It exists only
so users can check

-m conntrack --ctstate UNTRACKED vs.
-m conntrack --ctstate INVALID

e.g. attempts to set connmark on INVALID or UNTRACKED conntracks is
supposed to be a no-op.

Thus currently we need to check
 ct == NULL || nf_ct_is_untracked(ct)

in a lot of places in order to avoid altering untracked objects.

The other consequence of the percpu untracked object is that all
-j NOTRACK (and, later, kfree_skb of such skbs) result in an atomic op
(inc/dec the untracked conntracks refcount).

This adds a new kernel-private ctinfo state, IP_CT_UNTRACKED, to
make the distinction instead.

The (few) places that care about packet invalid (ct is NULL) vs.
packet untracked now need to test ct == NULL vs. ctinfo == IP_CT_UNTRACKED,
but all other places can omit the nf_ct_is_untracked() check.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 6e354a5e
...@@ -1556,12 +1556,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb) ...@@ -1556,12 +1556,8 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct || !nf_ct_is_untracked(ct)) { if (!ct || !nf_ct_is_untracked(ct)) {
struct nf_conn *untracked;
nf_conntrack_put(&ct->ct_general); nf_conntrack_put(&ct->ct_general);
untracked = nf_ct_untracked_get(); nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
nf_conntrack_get(&untracked->ct_general);
nf_ct_set(skb, untracked, IP_CT_NEW);
} }
#endif #endif
} }
......
...@@ -243,14 +243,6 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, ...@@ -243,14 +243,6 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir, enum ip_conntrack_dir dir,
u32 seq); u32 seq);
/* Fake conntrack entry for untracked connections */
DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void)
{
return raw_cpu_ptr(&nf_conntrack_untracked);
}
void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */ /* Iterate over all conntracks: if iter returns true, it's deleted. */
void nf_ct_iterate_cleanup(struct net *net, void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data), int (*iter)(struct nf_conn *i, void *data),
...@@ -283,7 +275,7 @@ static inline int nf_ct_is_dying(const struct nf_conn *ct) ...@@ -283,7 +275,7 @@ static inline int nf_ct_is_dying(const struct nf_conn *ct)
static inline int nf_ct_is_untracked(const struct nf_conn *ct) static inline int nf_ct_is_untracked(const struct nf_conn *ct)
{ {
return test_bit(IPS_UNTRACKED_BIT, &ct->status); return false;
} }
/* Packet is received from loopback */ /* Packet is received from loopback */
......
...@@ -28,12 +28,14 @@ enum ip_conntrack_info { ...@@ -28,12 +28,14 @@ enum ip_conntrack_info {
/* only for userspace compatibility */ /* only for userspace compatibility */
#ifndef __KERNEL__ #ifndef __KERNEL__
IP_CT_NEW_REPLY = IP_CT_NUMBER, IP_CT_NEW_REPLY = IP_CT_NUMBER,
#else
IP_CT_UNTRACKED = 7,
#endif #endif
}; };
#define NF_CT_STATE_INVALID_BIT (1 << 0) #define NF_CT_STATE_INVALID_BIT (1 << 0)
#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) #define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_NUMBER + 1)) #define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1))
/* Bitset representing status of connection. */ /* Bitset representing status of connection. */
enum ip_conntrack_status { enum ip_conntrack_status {
...@@ -94,7 +96,7 @@ enum ip_conntrack_status { ...@@ -94,7 +96,7 @@ enum ip_conntrack_status {
IPS_TEMPLATE_BIT = 11, IPS_TEMPLATE_BIT = 11,
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
/* Conntrack is a fake untracked entry */ /* Conntrack is a fake untracked entry. Obsolete and not used anymore */
IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
......
...@@ -69,8 +69,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, ...@@ -69,8 +69,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Avoid counting cloned packets towards the original connection. */ /* Avoid counting cloned packets towards the original connection. */
nf_reset(skb); nf_reset(skb);
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
nf_conntrack_get(skb_nfct(skb));
#endif #endif
/* /*
* If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
......
...@@ -221,8 +221,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl, ...@@ -221,8 +221,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
type = icmp6h->icmp6_type - 130; type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) && if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) { noct_valid_new[type]) {
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
nf_conntrack_get(skb_nfct(skb));
return NF_ACCEPT; return NF_ACCEPT;
} }
......
...@@ -58,8 +58,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, ...@@ -58,8 +58,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
#if IS_ENABLED(CONFIG_NF_CONNTRACK) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_reset(skb); nf_reset(skb);
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
nf_conntrack_get(skb_nfct(skb));
#endif #endif
if (hooknum == NF_INET_PRE_ROUTING || if (hooknum == NF_INET_PRE_ROUTING ||
hooknum == NF_INET_LOCAL_IN) { hooknum == NF_INET_LOCAL_IN) {
......
...@@ -180,14 +180,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); ...@@ -180,14 +180,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly; unsigned int nf_conntrack_max __read_mostly;
seqcount_t nf_conntrack_generation __read_mostly; seqcount_t nf_conntrack_generation __read_mostly;
/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
* for the nfctinfo. We cheat by (ab)using the PER CPU cache line
* alignment to enforce this.
*/
DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static unsigned int nf_conntrack_hash_rnd __read_mostly; static unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
...@@ -1314,9 +1306,10 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -1314,9 +1306,10 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
int ret; int ret;
tmpl = nf_ct_get(skb, &ctinfo); tmpl = nf_ct_get(skb, &ctinfo);
if (tmpl) { if (tmpl || ctinfo == IP_CT_UNTRACKED) {
/* Previously seen (loopback or untracked)? Ignore. */ /* Previously seen (loopback or untracked)? Ignore. */
if (!nf_ct_is_template(tmpl)) { if ((tmpl && !nf_ct_is_template(tmpl)) ||
ctinfo == IP_CT_UNTRACKED) {
NF_CT_STAT_INC_ATOMIC(net, ignore); NF_CT_STAT_INC_ATOMIC(net, ignore);
return NF_ACCEPT; return NF_ACCEPT;
} }
...@@ -1629,18 +1622,6 @@ void nf_ct_free_hashtable(void *hash, unsigned int size) ...@@ -1629,18 +1622,6 @@ void nf_ct_free_hashtable(void *hash, unsigned int size)
} }
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
static int untrack_refs(void)
{
int cnt = 0, cpu;
for_each_possible_cpu(cpu) {
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
cnt += atomic_read(&ct->ct_general.use) - 1;
}
return cnt;
}
void nf_conntrack_cleanup_start(void) void nf_conntrack_cleanup_start(void)
{ {
conntrack_gc_work.exiting = true; conntrack_gc_work.exiting = true;
...@@ -1650,8 +1631,6 @@ void nf_conntrack_cleanup_start(void) ...@@ -1650,8 +1631,6 @@ void nf_conntrack_cleanup_start(void)
void nf_conntrack_cleanup_end(void) void nf_conntrack_cleanup_end(void)
{ {
RCU_INIT_POINTER(nf_ct_destroy, NULL); RCU_INIT_POINTER(nf_ct_destroy, NULL);
while (untrack_refs() > 0)
schedule();
cancel_delayed_work_sync(&conntrack_gc_work.dwork); cancel_delayed_work_sync(&conntrack_gc_work.dwork);
nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
...@@ -1825,20 +1804,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); ...@@ -1825,20 +1804,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600); &nf_conntrack_htable_size, 0600);
void nf_ct_untracked_status_or(unsigned long bits)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(nf_conntrack_untracked, cpu).status |= bits;
}
EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
int nf_conntrack_init_start(void) int nf_conntrack_init_start(void)
{ {
int max_factor = 8; int max_factor = 8;
int ret = -ENOMEM; int ret = -ENOMEM;
int i, cpu; int i;
seqcount_init(&nf_conntrack_generation); seqcount_init(&nf_conntrack_generation);
...@@ -1921,15 +1891,6 @@ int nf_conntrack_init_start(void) ...@@ -1921,15 +1891,6 @@ int nf_conntrack_init_start(void)
if (ret < 0) if (ret < 0)
goto err_proto; goto err_proto;
/* Set up fake conntrack: to never be deleted, not in any hashes */
for_each_possible_cpu(cpu) {
struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
write_pnet(&ct->ct_net, &init_net);
atomic_set(&ct->ct_general.use, 1);
}
/* - and look it like as a confirmed connection */
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
conntrack_gc_work_init(&conntrack_gc_work); conntrack_gc_work_init(&conntrack_gc_work);
queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ); queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
...@@ -1977,6 +1938,7 @@ int nf_conntrack_init_net(struct net *net) ...@@ -1977,6 +1938,7 @@ int nf_conntrack_init_net(struct net *net)
int ret = -ENOMEM; int ret = -ENOMEM;
int cpu; int cpu;
BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
atomic_set(&net->ct.count, 0); atomic_set(&net->ct.count, 0);
net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
......
...@@ -861,9 +861,6 @@ static int __init nf_nat_init(void) ...@@ -861,9 +861,6 @@ static int __init nf_nat_init(void)
nf_ct_helper_expectfn_register(&follow_master_nat); nf_ct_helper_expectfn_register(&follow_master_nat);
/* Initialize fake conntrack so that NAT will skip it */
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup); nfnetlink_parse_nat_setup);
......
...@@ -72,12 +72,12 @@ static void nft_ct_get_eval(const struct nft_expr *expr, ...@@ -72,12 +72,12 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
switch (priv->key) { switch (priv->key) {
case NFT_CT_STATE: case NFT_CT_STATE:
if (ct == NULL) if (ct)
state = NF_CT_STATE_INVALID_BIT; state = NF_CT_STATE_BIT(ctinfo);
else if (nf_ct_is_untracked(ct)) else if (ctinfo == IP_CT_UNTRACKED)
state = NF_CT_STATE_UNTRACKED_BIT; state = NF_CT_STATE_UNTRACKED_BIT;
else else
state = NF_CT_STATE_BIT(ctinfo); state = NF_CT_STATE_INVALID_BIT;
*dest = state; *dest = state;
return; return;
default: default:
...@@ -718,12 +718,10 @@ static void nft_notrack_eval(const struct nft_expr *expr, ...@@ -718,12 +718,10 @@ static void nft_notrack_eval(const struct nft_expr *expr,
ct = nf_ct_get(pkt->skb, &ctinfo); ct = nf_ct_get(pkt->skb, &ctinfo);
/* Previously seen (loopback or untracked)? Ignore. */ /* Previously seen (loopback or untracked)? Ignore. */
if (ct) if (ct || ctinfo == IP_CT_UNTRACKED)
return; return;
ct = nf_ct_untracked_get(); nf_ct_set(skb, ct, IP_CT_UNTRACKED);
atomic_inc(&ct->ct_general.use);
nf_ct_set(skb, ct, IP_CT_NEW);
} }
static struct nft_expr_type nft_notrack_type; static struct nft_expr_type nft_notrack_type;
......
...@@ -26,11 +26,12 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) ...@@ -26,11 +26,12 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
if (skb->_nfct != 0) if (skb->_nfct != 0)
return XT_CONTINUE; return XT_CONTINUE;
/* special case the untracked ct : we want the percpu object */ if (ct) {
if (!ct) atomic_inc(&ct->ct_general.use);
ct = nf_ct_untracked_get(); nf_ct_set(skb, ct, IP_CT_NEW);
atomic_inc(&ct->ct_general.use); } else {
nf_ct_set(skb, ct, IP_CT_NEW); nf_ct_set(skb, ct, IP_CT_UNTRACKED);
}
return XT_CONTINUE; return XT_CONTINUE;
} }
...@@ -335,7 +336,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par, ...@@ -335,7 +336,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
struct nf_conn *ct = info->ct; struct nf_conn *ct = info->ct;
struct nf_conn_help *help; struct nf_conn_help *help;
if (ct && !nf_ct_is_untracked(ct)) { if (ct) {
help = nfct_help(ct); help = nfct_help(ct);
if (help) if (help)
module_put(help->helper->me); module_put(help->helper->me);
...@@ -412,8 +413,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -412,8 +413,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (skb->_nfct != 0) if (skb->_nfct != 0)
return XT_CONTINUE; return XT_CONTINUE;
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW); nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
nf_conntrack_get(skb_nfct(skb));
return XT_CONTINUE; return XT_CONTINUE;
} }
......
...@@ -172,12 +172,11 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, ...@@ -172,12 +172,11 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
ct = nf_ct_get(skb, &ctinfo); ct = nf_ct_get(skb, &ctinfo);
if (ct) { if (ct)
if (nf_ct_is_untracked(ct)) statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
statebit = XT_CONNTRACK_STATE_UNTRACKED; else if (ctinfo == IP_CT_UNTRACKED)
else statebit = XT_CONNTRACK_STATE_UNTRACKED;
statebit = XT_CONNTRACK_STATE_BIT(ctinfo); else
} else
statebit = XT_CONNTRACK_STATE_INVALID; statebit = XT_CONNTRACK_STATE_INVALID;
if (info->match_flags & XT_CONNTRACK_STATE) { if (info->match_flags & XT_CONNTRACK_STATE) {
......
...@@ -28,14 +28,13 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -28,14 +28,13 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
unsigned int statebit; unsigned int statebit;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct) if (ct)
statebit = XT_STATE_BIT(ctinfo);
else if (ctinfo == IP_CT_UNTRACKED)
statebit = XT_STATE_UNTRACKED;
else
statebit = XT_STATE_INVALID; statebit = XT_STATE_INVALID;
else {
if (nf_ct_is_untracked(ct))
statebit = XT_STATE_UNTRACKED;
else
statebit = XT_STATE_BIT(ctinfo);
}
return (sinfo->statemask & statebit); return (sinfo->statemask & statebit);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment