Commit 0320d1e7 authored by David S. Miller's avatar David S. Miller

Merge branch 'netfilter-flowtable-hardware-offload'

Pablo Neira Ayuso says:

====================
netfilter flowtable hardware offload

The following patchset adds hardware offload support for the flowtable
infrastructure [1]. This infrastructure provides a fast datapath for
the classic Linux forwarding path that users can enable through policy,
eg.

 table inet x {
      flowtable f {
               hook ingress priority 10 devices = { eth0, eth1 }
	       flags offload
      }
      chain y {
               type filter hook forward priority 0; policy accept;
               ip protocol tcp flow offload @f
      }
 }

This example above enables the fastpath for TCP traffic between devices
eth0 and eth1. Users can turn on the hardware offload through the
'offload' flag from the flowtable definition. If this new flag is not
specified, the software flowtable datapath is used.

This patchset is composed of 4 preparation patches:

   room to extend this infrastructure, eg. accelerate bridge forwarding.

And 2 patches to add the hardware offload control and data planes:

   hardware offload. This includes a new NFTA_FLOWTABLE_FLAGS netlink
   attribute to convey the optional NF_FLOWTABLE_HW_OFFLOAD flag.
   API available at net/core/flow_offload.h to represent the flow
   through two flow_rule objects to configure an exact 5-tuple matching
   on each direction plus the corresponding forwarding actions, that is,
   the MAC address, NAT and checksum updates; and port redirection in
   order to configure the hardware datapath. This patch only supports
   for IPv4 support and statistics collection for flow aging as an initial
   step.

This patchset introduces a new flow_block callback type that needs to be
set up to configure the flowtable hardware offload.

The first client of this infrastructure follows up after this batch.
I would like to thank Mellanox for developing the first upstream driver
to use this infrastructure.

[1] Documentation/networking/nf_flowtable.txt
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4717b053 c29f74e0
...@@ -848,6 +848,7 @@ enum tc_setup_type { ...@@ -848,6 +848,7 @@ enum tc_setup_type {
TC_SETUP_ROOT_QDISC, TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED, TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO, TC_SETUP_QDISC_TAPRIO,
TC_SETUP_FT,
}; };
/* These structures hold the attributes of bpf state that are being passed /* These structures hold the attributes of bpf state that are being passed
......
...@@ -8,25 +8,43 @@ ...@@ -8,25 +8,43 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/netfilter.h> #include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/flow_offload.h>
#include <net/dst.h> #include <net/dst.h>
struct nf_flowtable; struct nf_flowtable;
struct nf_flow_rule;
struct flow_offload;
enum flow_offload_tuple_dir;
struct nf_flowtable_type { struct nf_flowtable_type {
struct list_head list; struct list_head list;
int family; int family;
int (*init)(struct nf_flowtable *ft); int (*init)(struct nf_flowtable *ft);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
int (*action)(struct net *net,
const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft); void (*free)(struct nf_flowtable *ft);
nf_hookfn *hook; nf_hookfn *hook;
struct module *owner; struct module *owner;
}; };
enum nf_flowtable_flags {
NF_FLOWTABLE_HW_OFFLOAD = 0x1,
};
struct nf_flowtable { struct nf_flowtable {
struct list_head list; struct list_head list;
struct rhashtable rhashtable; struct rhashtable rhashtable;
int priority; int priority;
const struct nf_flowtable_type *type; const struct nf_flowtable_type *type;
struct delayed_work gc_work; struct delayed_work gc_work;
unsigned int flags;
struct flow_block flow_block;
possible_net_t net;
}; };
enum flow_offload_tuple_dir { enum flow_offload_tuple_dir {
...@@ -69,14 +87,22 @@ struct flow_offload_tuple_rhash { ...@@ -69,14 +87,22 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_DNAT 0x2 #define FLOW_OFFLOAD_DNAT 0x2
#define FLOW_OFFLOAD_DYING 0x4 #define FLOW_OFFLOAD_DYING 0x4
#define FLOW_OFFLOAD_TEARDOWN 0x8 #define FLOW_OFFLOAD_TEARDOWN 0x8
#define FLOW_OFFLOAD_HW 0x10
#define FLOW_OFFLOAD_HW_DYING 0x20
#define FLOW_OFFLOAD_HW_DEAD 0x40
enum flow_offload_type {
NF_FLOW_OFFLOAD_UNSPEC = 0,
NF_FLOW_OFFLOAD_ROUTE,
};
struct flow_offload { struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
u32 flags; struct nf_conn *ct;
union { u16 flags;
/* Your private driver data here. */ u16 type;
u32 timeout; u32 timeout;
}; struct rcu_head rcu_head;
}; };
#define NF_FLOW_TIMEOUT (30 * HZ) #define NF_FLOW_TIMEOUT (30 * HZ)
...@@ -87,10 +113,12 @@ struct nf_flow_route { ...@@ -87,10 +113,12 @@ struct nf_flow_route {
} tuple[FLOW_OFFLOAD_DIR_MAX]; } tuple[FLOW_OFFLOAD_DIR_MAX];
}; };
struct flow_offload *flow_offload_alloc(struct nf_conn *ct, struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
struct nf_flow_route *route);
void flow_offload_free(struct flow_offload *flow); void flow_offload_free(struct flow_offload *flow);
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple); struct flow_offload_tuple *tuple);
...@@ -124,4 +152,22 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, ...@@ -124,4 +152,22 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
#define MODULE_ALIAS_NF_FLOWTABLE(family) \ #define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family)) MODULE_ALIAS("nf-flowtable-" __stringify(family))
void nf_flow_offload_add(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_offload_del(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_offload_stats(struct nf_flowtable *flowtable,
struct flow_offload *flow);
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct net_device *dev,
enum flow_block_command cmd);
int nf_flow_rule_route(struct net *net, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
#endif /* _NF_FLOW_TABLE_H */ #endif /* _NF_FLOW_TABLE_H */
...@@ -1518,6 +1518,7 @@ enum nft_object_attributes { ...@@ -1518,6 +1518,7 @@ enum nft_object_attributes {
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32) * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32) * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64) * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
* @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
*/ */
enum nft_flowtable_attributes { enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC, NFTA_FLOWTABLE_UNSPEC,
...@@ -1527,6 +1528,7 @@ enum nft_flowtable_attributes { ...@@ -1527,6 +1528,7 @@ enum nft_flowtable_attributes {
NFTA_FLOWTABLE_USE, NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE, NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD, NFTA_FLOWTABLE_PAD,
NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX __NFTA_FLOWTABLE_MAX
}; };
#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1) #define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
static struct nf_flowtable_type flowtable_ipv4 = { static struct nf_flowtable_type flowtable_ipv4 = {
.family = NFPROTO_IPV4, .family = NFPROTO_IPV4,
.init = nf_flow_table_init, .init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route,
.free = nf_flow_table_free, .free = nf_flow_table_free,
.hook = nf_flow_offload_ip_hook, .hook = nf_flow_offload_ip_hook,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
static struct nf_flowtable_type flowtable_ipv6 = { static struct nf_flowtable_type flowtable_ipv6 = {
.family = NFPROTO_IPV6, .family = NFPROTO_IPV6,
.init = nf_flow_table_init, .init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route,
.free = nf_flow_table_free, .free = nf_flow_table_free,
.hook = nf_flow_offload_ipv6_hook, .hook = nf_flow_offload_ipv6_hook,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -120,7 +120,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o ...@@ -120,7 +120,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
# flow table infrastructure # flow table infrastructure
obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \
nf_flow_table_offload.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
......
...@@ -14,24 +14,15 @@ ...@@ -14,24 +14,15 @@
#include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_tuple.h>
struct flow_offload_entry {
struct flow_offload flow;
struct nf_conn *ct;
struct rcu_head rcu_head;
};
static DEFINE_MUTEX(flowtable_lock); static DEFINE_MUTEX(flowtable_lock);
static LIST_HEAD(flowtables); static LIST_HEAD(flowtables);
static void static void
flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
struct nf_flow_route *route,
enum flow_offload_tuple_dir dir) enum flow_offload_tuple_dir dir)
{ {
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
ft->dir = dir; ft->dir = dir;
...@@ -39,12 +30,10 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, ...@@ -39,12 +30,10 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
case NFPROTO_IPV4: case NFPROTO_IPV4:
ft->src_v4 = ctt->src.u3.in; ft->src_v4 = ctt->src.u3.in;
ft->dst_v4 = ctt->dst.u3.in; ft->dst_v4 = ctt->dst.u3.in;
ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
break; break;
case NFPROTO_IPV6: case NFPROTO_IPV6:
ft->src_v6 = ctt->src.u3.in6; ft->src_v6 = ctt->src.u3.in6;
ft->dst_v6 = ctt->dst.u3.in6; ft->dst_v6 = ctt->dst.u3.in6;
ft->mtu = ip6_dst_mtu_forward(dst);
break; break;
} }
...@@ -52,37 +41,24 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, ...@@ -52,37 +41,24 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->l4proto = ctt->dst.protonum; ft->l4proto = ctt->dst.protonum;
ft->src_port = ctt->src.u.tcp.port; ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port; ft->dst_port = ctt->dst.u.tcp.port;
ft->iifidx = other_dst->dev->ifindex;
ft->dst_cache = dst;
} }
struct flow_offload * struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
{ {
struct flow_offload_entry *entry;
struct flow_offload *flow; struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct) || if (unlikely(nf_ct_is_dying(ct) ||
!atomic_inc_not_zero(&ct->ct_general.use))) !atomic_inc_not_zero(&ct->ct_general.use)))
return NULL; return NULL;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC); flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
if (!entry) if (!flow)
goto err_ct_refcnt; goto err_ct_refcnt;
flow = &entry->flow; flow->ct = ct;
if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst)) flow_offload_fill_dir(flow, ct, FLOW_OFFLOAD_DIR_ORIGINAL);
goto err_dst_cache_original; flow_offload_fill_dir(flow, ct, FLOW_OFFLOAD_DIR_REPLY);
if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
goto err_dst_cache_reply;
entry->ct = ct;
flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT) if (ct->status & IPS_SRC_NAT)
flow->flags |= FLOW_OFFLOAD_SNAT; flow->flags |= FLOW_OFFLOAD_SNAT;
...@@ -91,10 +67,6 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) ...@@ -91,10 +67,6 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
return flow; return flow;
err_dst_cache_reply:
dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
err_dst_cache_original:
kfree(entry);
err_ct_refcnt: err_ct_refcnt:
nf_ct_put(ct); nf_ct_put(ct);
...@@ -102,6 +74,56 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) ...@@ -102,6 +74,56 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
} }
EXPORT_SYMBOL_GPL(flow_offload_alloc); EXPORT_SYMBOL_GPL(flow_offload_alloc);
static int flow_offload_fill_route(struct flow_offload *flow,
const struct nf_flow_route *route,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
if (!dst_hold_safe(route->tuple[dir].dst))
return -1;
switch (flow_tuple->l3proto) {
case NFPROTO_IPV4:
flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
break;
case NFPROTO_IPV6:
flow_tuple->mtu = ip6_dst_mtu_forward(dst);
break;
}
flow_tuple->iifidx = other_dst->dev->ifindex;
flow_tuple->dst_cache = dst;
return 0;
}
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route)
{
int err;
err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
if (err < 0)
return err;
err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
if (err < 0)
goto err_route_reply;
flow->type = NF_FLOW_OFFLOAD_ROUTE;
return 0;
err_route_reply:
dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
return err;
}
EXPORT_SYMBOL_GPL(flow_offload_route_init);
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
{ {
tcp->state = TCP_CONNTRACK_ESTABLISHED; tcp->state = TCP_CONNTRACK_ESTABLISHED;
...@@ -150,17 +172,25 @@ static void flow_offload_fixup_ct(struct nf_conn *ct) ...@@ -150,17 +172,25 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
flow_offload_fixup_ct_timeout(ct); flow_offload_fixup_ct_timeout(ct);
} }
void flow_offload_free(struct flow_offload *flow) static void flow_offload_route_release(struct flow_offload *flow)
{ {
struct flow_offload_entry *e;
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache); dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache); dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
e = container_of(flow, struct flow_offload_entry, flow); }
void flow_offload_free(struct flow_offload *flow)
{
switch (flow->type) {
case NF_FLOW_OFFLOAD_ROUTE:
flow_offload_route_release(flow);
break;
default:
break;
}
if (flow->flags & FLOW_OFFLOAD_DYING) if (flow->flags & FLOW_OFFLOAD_DYING)
nf_ct_delete(e->ct, 0, 0); nf_ct_delete(flow->ct, 0, 0);
nf_ct_put(e->ct); nf_ct_put(flow->ct);
kfree_rcu(e, rcu_head); kfree_rcu(flow, rcu_head);
} }
EXPORT_SYMBOL_GPL(flow_offload_free); EXPORT_SYMBOL_GPL(flow_offload_free);
...@@ -220,6 +250,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) ...@@ -220,6 +250,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
return err; return err;
} }
if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
nf_flow_offload_add(flow_table, flow);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(flow_offload_add); EXPORT_SYMBOL_GPL(flow_offload_add);
...@@ -232,8 +265,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow) ...@@ -232,8 +265,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
static void flow_offload_del(struct nf_flowtable *flow_table, static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow) struct flow_offload *flow)
{ {
struct flow_offload_entry *e;
rhashtable_remove_fast(&flow_table->rhashtable, rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
nf_flow_offload_rhash_params); nf_flow_offload_rhash_params);
...@@ -241,25 +272,21 @@ static void flow_offload_del(struct nf_flowtable *flow_table, ...@@ -241,25 +272,21 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
nf_flow_offload_rhash_params); nf_flow_offload_rhash_params);
e = container_of(flow, struct flow_offload_entry, flow); clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
if (nf_flow_has_expired(flow)) if (nf_flow_has_expired(flow))
flow_offload_fixup_ct(e->ct); flow_offload_fixup_ct(flow->ct);
else if (flow->flags & FLOW_OFFLOAD_TEARDOWN) else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
flow_offload_fixup_ct_timeout(e->ct); flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow); flow_offload_free(flow);
} }
void flow_offload_teardown(struct flow_offload *flow) void flow_offload_teardown(struct flow_offload *flow)
{ {
struct flow_offload_entry *e;
flow->flags |= FLOW_OFFLOAD_TEARDOWN; flow->flags |= FLOW_OFFLOAD_TEARDOWN;
e = container_of(flow, struct flow_offload_entry, flow); flow_offload_fixup_ct_state(flow->ct);
flow_offload_fixup_ct_state(e->ct);
} }
EXPORT_SYMBOL_GPL(flow_offload_teardown); EXPORT_SYMBOL_GPL(flow_offload_teardown);
...@@ -269,7 +296,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table, ...@@ -269,7 +296,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
{ {
struct flow_offload_tuple_rhash *tuplehash; struct flow_offload_tuple_rhash *tuplehash;
struct flow_offload *flow; struct flow_offload *flow;
struct flow_offload_entry *e;
int dir; int dir;
tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple, tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
...@@ -282,8 +308,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table, ...@@ -282,8 +308,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)) if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
return NULL; return NULL;
e = container_of(flow, struct flow_offload_entry, flow); if (unlikely(nf_ct_is_dying(flow->ct)))
if (unlikely(nf_ct_is_dying(e->ct)))
return NULL; return NULL;
return tuplehash; return tuplehash;
...@@ -327,12 +352,21 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, ...@@ -327,12 +352,21 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{ {
struct nf_flowtable *flow_table = data; struct nf_flowtable *flow_table = data;
struct flow_offload_entry *e;
e = container_of(flow, struct flow_offload_entry, flow); if (flow->flags & FLOW_OFFLOAD_HW)
if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) || nf_flow_offload_stats(flow_table, flow);
(flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
flow_offload_del(flow_table, flow); if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
(flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
if (flow->flags & FLOW_OFFLOAD_HW) {
if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
nf_flow_offload_del(flow_table, flow);
else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
flow_offload_del(flow_table, flow);
} else {
flow_offload_del(flow_table, flow);
}
}
} }
static void nf_flow_offload_work_gc(struct work_struct *work) static void nf_flow_offload_work_gc(struct work_struct *work)
...@@ -465,6 +499,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable) ...@@ -465,6 +499,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
int err; int err;
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
flow_block_init(&flowtable->flow_block);
err = rhashtable_init(&flowtable->rhashtable, err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params); &nf_flow_offload_rhash_params);
...@@ -485,15 +520,13 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init); ...@@ -485,15 +520,13 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{ {
struct net_device *dev = data; struct net_device *dev = data;
struct flow_offload_entry *e;
e = container_of(flow, struct flow_offload_entry, flow);
if (!dev) { if (!dev) {
flow_offload_teardown(flow); flow_offload_teardown(flow);
return; return;
} }
if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
(flow->tuplehash[0].tuple.iifidx == dev->ifindex || (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex)) flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_dead(flow); flow_offload_dead(flow);
...@@ -502,6 +535,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) ...@@ -502,6 +535,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable, static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev) struct net_device *dev)
{ {
nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev); nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work); flush_delayed_work(&flowtable->gc_work);
} }
...@@ -529,5 +563,18 @@ void nf_flow_table_free(struct nf_flowtable *flow_table) ...@@ -529,5 +563,18 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
} }
EXPORT_SYMBOL_GPL(nf_flow_table_free); EXPORT_SYMBOL_GPL(nf_flow_table_free);
static int __init nf_flow_table_module_init(void)
{
return nf_flow_table_offload_init();
}
static void __exit nf_flow_table_module_exit(void)
{
nf_flow_table_offload_exit();
}
module_init(nf_flow_table_module_init);
module_exit(nf_flow_table_module_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
...@@ -24,6 +24,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb, ...@@ -24,6 +24,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
static struct nf_flowtable_type flowtable_inet = { static struct nf_flowtable_type flowtable_inet = {
.family = NFPROTO_INET, .family = NFPROTO_INET,
.init = nf_flow_table_init, .init = nf_flow_table_init,
.setup = nf_flow_table_offload_setup,
.action = nf_flow_rule_route,
.free = nf_flow_table_free, .free = nf_flow_table_free,
.hook = nf_flow_offload_inet_hook, .hook = nf_flow_offload_inet_hook,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
This diff is collapsed.
...@@ -5835,6 +5835,7 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = { ...@@ -5835,6 +5835,7 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
.len = NFT_NAME_MAXLEN - 1 }, .len = NFT_NAME_MAXLEN - 1 },
[NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED }, [NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED },
[NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 }, [NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
[NFTA_FLOWTABLE_FLAGS] = { .type = NLA_U32 },
}; };
struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table, struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
...@@ -5968,8 +5969,11 @@ static void nft_unregister_flowtable_net_hooks(struct net *net, ...@@ -5968,8 +5969,11 @@ static void nft_unregister_flowtable_net_hooks(struct net *net,
{ {
struct nft_hook *hook; struct nft_hook *hook;
list_for_each_entry(hook, &flowtable->hook_list, list) list_for_each_entry(hook, &flowtable->hook_list, list) {
nf_unregister_net_hook(net, &hook->ops); nf_unregister_net_hook(net, &hook->ops);
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
}
} }
static int nft_register_flowtable_net_hooks(struct net *net, static int nft_register_flowtable_net_hooks(struct net *net,
...@@ -5991,6 +5995,8 @@ static int nft_register_flowtable_net_hooks(struct net *net, ...@@ -5991,6 +5995,8 @@ static int nft_register_flowtable_net_hooks(struct net *net,
} }
} }
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_BIND);
err = nf_register_net_hook(net, &hook->ops); err = nf_register_net_hook(net, &hook->ops);
if (err < 0) if (err < 0)
goto err_unregister_net_hooks; goto err_unregister_net_hooks;
...@@ -6006,6 +6012,8 @@ static int nft_register_flowtable_net_hooks(struct net *net, ...@@ -6006,6 +6012,8 @@ static int nft_register_flowtable_net_hooks(struct net *net,
break; break;
nf_unregister_net_hook(net, &hook->ops); nf_unregister_net_hook(net, &hook->ops);
flowtable->data.type->setup(&flowtable->data, hook->ops.dev,
FLOW_BLOCK_UNBIND);
list_del_rcu(&hook->list); list_del_rcu(&hook->list);
kfree_rcu(hook, rcu); kfree_rcu(hook, rcu);
} }
...@@ -6080,6 +6088,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, ...@@ -6080,6 +6088,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
goto err2; goto err2;
} }
if (nla[NFTA_FLOWTABLE_FLAGS]) {
flowtable->data.flags =
ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
if (flowtable->data.flags & ~NF_FLOWTABLE_HW_OFFLOAD)
goto err3;
}
write_pnet(&flowtable->data.net, net);
flowtable->data.type = type; flowtable->data.type = type;
err = type->init(&flowtable->data); err = type->init(&flowtable->data);
if (err < 0) if (err < 0)
...@@ -6191,7 +6207,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, ...@@ -6191,7 +6207,8 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) || nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) || nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle), nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
NFTA_FLOWTABLE_PAD)) NFTA_FLOWTABLE_PAD) ||
nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
goto nla_put_failure; goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK); nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
......
...@@ -115,10 +115,13 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, ...@@ -115,10 +115,13 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
if (nft_flow_route(pkt, ct, &route, dir) < 0) if (nft_flow_route(pkt, ct, &route, dir) < 0)
goto err_flow_route; goto err_flow_route;
flow = flow_offload_alloc(ct, &route); flow = flow_offload_alloc(ct);
if (!flow) if (!flow)
goto err_flow_alloc; goto err_flow_alloc;
if (flow_offload_route_init(flow, &route) < 0)
goto err_flow_add;
if (tcph) { if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment