Commit 51b03285 authored by Harald Welte's avatar Harald Welte Committed by David S. Miller

[NETFILTER]: Add conntrack runtime statistics

This patch adds some runtime-statistics to the connection tracking core,
pretty similar to what 'rtstat' does for the routing cache.

This was the last patch in this incremental set.  The only thing I still
have pending at this time is the tcp window tracking code.  
Signed-off-by: default avatarMartin Josefsson <gandalf@wlug.westbo.se>
Signed-off-by: default avatarHarald Welte <laforge@netfilter.org>
Signed-off-by: default avatarDavid S. Miller <davem@redhat.com>
parent 08d121b7
......@@ -284,6 +284,26 @@ static inline int is_confirmed(struct ip_conntrack *ct)
}
extern unsigned int ip_conntrack_htable_size;
struct ip_conntrack_stat
{
unsigned int searched;
unsigned int found;
unsigned int new;
unsigned int invalid;
unsigned int ignore;
unsigned int delete;
unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
unsigned int early_drop;
unsigned int icmp_error;
unsigned int expect_new;
unsigned int expect_create;
unsigned int expect_delete;
};
/* eg. PROVIDES_CONNTRACK(ftp); */
#define PROVIDES_CONNTRACK(name) \
......
......@@ -35,6 +35,7 @@
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
/* This rwlock protects the main hash table, protocol/helper/expected
......@@ -58,6 +59,7 @@
DECLARE_RWLOCK(ip_conntrack_lock);
DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock);
static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
LIST_HEAD(ip_conntrack_expect_list);
......@@ -65,12 +67,13 @@ LIST_HEAD(protocol_list);
static LIST_HEAD(helpers);
unsigned int ip_conntrack_htable_size = 0;
int ip_conntrack_max;
static atomic_t ip_conntrack_count = ATOMIC_INIT(0);
struct list_head *ip_conntrack_hash;
static kmem_cache_t *ip_conntrack_cachep;
static kmem_cache_t *ip_conntrack_expect_cachep;
struct ip_conntrack ip_conntrack_untracked;
DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
extern struct ip_conntrack_protocol ip_conntrack_generic_protocol;
static inline int proto_cmpfn(const struct ip_conntrack_protocol *curr,
......@@ -179,6 +182,7 @@ destroy_expect(struct ip_conntrack_expect *exp)
IP_NF_ASSERT(!timer_pending(&exp->timeout));
kmem_cache_free(ip_conntrack_expect_cachep, exp);
__get_cpu_var(ip_conntrack_stat).expect_delete++;
}
inline void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
......@@ -347,12 +351,15 @@ destroy_conntrack(struct nf_conntrack *nfct)
DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
kmem_cache_free(ip_conntrack_cachep, ct);
atomic_dec(&ip_conntrack_count);
__get_cpu_var(ip_conntrack_stat).delete++;
}
static void death_by_timeout(unsigned long ul_conntrack)
{
struct ip_conntrack *ct = (void *)ul_conntrack;
__get_cpu_var(ip_conntrack_stat).delete_list++;
WRITE_LOCK(&ip_conntrack_lock);
clean_from_lists(ct);
WRITE_UNLOCK(&ip_conntrack_lock);
......@@ -375,13 +382,19 @@ __ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
{
struct ip_conntrack_tuple_hash *h;
unsigned int hash = hash_conntrack(tuple);
/* use per_cpu() to avoid multiple calls to smp_processor_id() */
unsigned int cpu = smp_processor_id();
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
h = LIST_FIND(&ip_conntrack_hash[hash],
conntrack_tuple_cmp,
struct ip_conntrack_tuple_hash *,
tuple, ignored_conntrack);
return h;
list_for_each_entry(h, &ip_conntrack_hash[hash], list) {
if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
per_cpu(ip_conntrack_stat, cpu).found++;
return h;
}
per_cpu(ip_conntrack_stat, cpu).searched++;
}
return NULL;
}
/* Find a connection corresponding to a tuple. */
......@@ -475,10 +488,12 @@ __ip_conntrack_confirm(struct nf_ct_info *nfct)
atomic_inc(&ct->ct_general.use);
set_bit(IPS_CONFIRMED_BIT, &ct->status);
WRITE_UNLOCK(&ip_conntrack_lock);
__get_cpu_var(ip_conntrack_stat).insert++;
return NF_ACCEPT;
}
WRITE_UNLOCK(&ip_conntrack_lock);
__get_cpu_var(ip_conntrack_stat).insert_failed++;
return NF_DROP;
}
......@@ -522,6 +537,7 @@ static int early_drop(struct list_head *chain)
if (del_timer(&h->ctrack->timeout)) {
death_by_timeout((unsigned long)h->ctrack);
dropped = 1;
__get_cpu_var(ip_conntrack_stat).early_drop++;
}
ip_conntrack_put(h->ctrack);
return dropped;
......@@ -650,11 +666,16 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
if (expected->expectfn)
expected->expectfn(conntrack);
__get_cpu_var(ip_conntrack_stat).expect_new++;
goto ret;
} else
} else {
conntrack->helper = ip_ct_find_helper(&repl_tuple);
__get_cpu_var(ip_conntrack_stat).new++;
}
end: atomic_inc(&ip_conntrack_count);
WRITE_UNLOCK(&ip_conntrack_lock);
......@@ -755,8 +776,10 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
#endif
/* Previously seen (loopback or untracked)? Ignore. */
if ((*pskb)->nfct)
if ((*pskb)->nfct) {
__get_cpu_var(ip_conntrack_stat).ignore++;
return NF_ACCEPT;
}
proto = ip_ct_find_proto((*pskb)->nh.iph->protocol);
......@@ -764,16 +787,22 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
* inverse of the return code tells to the netfilter
* core what to do with the packet. */
if (proto->error != NULL
&& (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0)
&& (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
__get_cpu_var(ip_conntrack_stat).icmp_error++;
return -ret;
}
if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo)))
if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) {
/* Not valid part of a connection */
__get_cpu_var(ip_conntrack_stat).invalid++;
return NF_ACCEPT;
}
if (IS_ERR(ct))
if (IS_ERR(ct)) {
/* Too stressed to deal. */
__get_cpu_var(ip_conntrack_stat).drop++;
return NF_DROP;
}
IP_NF_ASSERT((*pskb)->nfct);
......@@ -782,6 +811,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
/* Invalid */
nf_conntrack_put((*pskb)->nfct);
(*pskb)->nfct = NULL;
__get_cpu_var(ip_conntrack_stat).invalid++;
return NF_ACCEPT;
}
......@@ -789,6 +819,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum,
ret = ct->helper->help(*pskb, ct, ctinfo);
if (ret == -1) {
/* Invalid */
__get_cpu_var(ip_conntrack_stat).invalid++;
nf_conntrack_put((*pskb)->nfct);
(*pskb)->nfct = NULL;
return NF_ACCEPT;
......@@ -992,6 +1023,8 @@ out: ip_conntrack_expect_insert(expect, related_to);
WRITE_UNLOCK(&ip_conntrack_lock);
__get_cpu_var(ip_conntrack_stat).expect_create++;
return ret;
}
......
......@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
......@@ -44,6 +45,9 @@
MODULE_LICENSE("GPL");
extern atomic_t ip_conntrack_count;
DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
static int kill_proto(const struct ip_conntrack *i, void *data)
{
return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
......@@ -283,6 +287,86 @@ static struct file_operations exp_file_ops = {
.release = seq_release
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu;
return &per_cpu(ip_conntrack_stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos + 1; cpu < NR_CPUS; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu;
return &per_cpu(ip_conntrack_stat, cpu);
}
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
unsigned int nr_conntracks = atomic_read(&ip_conntrack_count);
struct ip_conntrack_stat *st = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x \n",
nr_conntracks,
st->searched,
st->found,
st->new,
st->invalid,
st->ignore,
st->delete,
st->delete_list,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->icmp_error,
st->expect_new,
st->expect_create,
st->expect_delete
);
return 0;
}
static struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int ct_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ct_cpu_seq_ops);
}
static struct file_operations ct_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static unsigned int ip_confirm(unsigned int hooknum,
struct sk_buff **pskb,
const struct net_device *in,
......@@ -608,7 +692,7 @@ static ctl_table ip_ct_net_table[] = {
#endif
static int init_or_cleanup(int init)
{
struct proc_dir_entry *proc, *proc_exp;
struct proc_dir_entry *proc, *proc_exp, *proc_stat;
int ret = 0;
if (!init) goto cleanup;
......@@ -625,10 +709,16 @@ static int init_or_cleanup(int init)
if (!proc_exp) goto cleanup_proc;
proc_exp->proc_fops = &exp_file_ops;
proc_stat = proc_net_fops_create("ip_conntrack_stat", S_IRUGO,
&ct_cpu_seq_fops);
if (!proc_stat)
goto cleanup_proc_exp;
proc_stat->owner = THIS_MODULE;
ret = nf_register_hook(&ip_conntrack_defrag_ops);
if (ret < 0) {
printk("ip_conntrack: can't register pre-routing defrag hook.\n");
goto cleanup_proc_exp;
goto cleanup_proc_stat;
}
ret = nf_register_hook(&ip_conntrack_defrag_local_out_ops);
if (ret < 0) {
......@@ -680,6 +770,8 @@ static int init_or_cleanup(int init)
nf_unregister_hook(&ip_conntrack_defrag_local_out_ops);
cleanup_defragops:
nf_unregister_hook(&ip_conntrack_defrag_ops);
cleanup_proc_stat:
proc_net_remove("ip_conntrack_stat");
cleanup_proc_exp:
proc_net_remove("ip_conntrack_exp");
cleanup_proc:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment