Commit 4a766d49 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso

netfilter: nf_flow_table_offload: add flow_action_entry_next() and use it

This function retrieves a spare action entry from the array of actions.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 6408c40c
...@@ -112,13 +112,22 @@ static void flow_offload_mangle(struct flow_action_entry *entry, ...@@ -112,13 +112,22 @@ static void flow_offload_mangle(struct flow_action_entry *entry,
memcpy(&entry->mangle.val, value, sizeof(u32)); memcpy(&entry->mangle.val, value, sizeof(u32));
} }
static inline struct flow_action_entry *
flow_action_entry_next(struct nf_flow_rule *flow_rule)
{
int i = flow_rule->rule->action.num_entries++;
return &flow_rule->rule->action.entries[i];
}
static int flow_offload_eth_src(struct net *net, static int flow_offload_eth_src(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry0, struct nf_flow_rule *flow_rule)
struct flow_action_entry *entry1)
{ {
const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple; const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
struct net_device *dev; struct net_device *dev;
u32 mask, val; u32 mask, val;
u16 val16; u16 val16;
...@@ -145,10 +154,11 @@ static int flow_offload_eth_src(struct net *net, ...@@ -145,10 +154,11 @@ static int flow_offload_eth_src(struct net *net,
static int flow_offload_eth_dst(struct net *net, static int flow_offload_eth_dst(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry0, struct nf_flow_rule *flow_rule)
struct flow_action_entry *entry1)
{ {
const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
struct neighbour *n; struct neighbour *n;
u32 mask, val; u32 mask, val;
u16 val16; u16 val16;
...@@ -175,8 +185,9 @@ static int flow_offload_eth_dst(struct net *net, ...@@ -175,8 +185,9 @@ static int flow_offload_eth_dst(struct net *net,
static void flow_offload_ipv4_snat(struct net *net, static void flow_offload_ipv4_snat(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff); u32 mask = ~htonl(0xffffffff);
__be32 addr; __be32 addr;
u32 offset; u32 offset;
...@@ -201,8 +212,9 @@ static void flow_offload_ipv4_snat(struct net *net, ...@@ -201,8 +212,9 @@ static void flow_offload_ipv4_snat(struct net *net,
static void flow_offload_ipv4_dnat(struct net *net, static void flow_offload_ipv4_dnat(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffffffff); u32 mask = ~htonl(0xffffffff);
__be32 addr; __be32 addr;
u32 offset; u32 offset;
...@@ -246,8 +258,9 @@ static int flow_offload_l4proto(const struct flow_offload *flow) ...@@ -246,8 +258,9 @@ static int flow_offload_l4proto(const struct flow_offload *flow)
static void flow_offload_port_snat(struct net *net, static void flow_offload_port_snat(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffff0000); u32 mask = ~htonl(0xffff0000);
__be16 port; __be16 port;
u32 offset; u32 offset;
...@@ -272,8 +285,9 @@ static void flow_offload_port_snat(struct net *net, ...@@ -272,8 +285,9 @@ static void flow_offload_port_snat(struct net *net,
static void flow_offload_port_dnat(struct net *net, static void flow_offload_port_dnat(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
u32 mask = ~htonl(0xffff); u32 mask = ~htonl(0xffff);
__be16 port; __be16 port;
u32 offset; u32 offset;
...@@ -297,9 +311,10 @@ static void flow_offload_port_dnat(struct net *net, ...@@ -297,9 +311,10 @@ static void flow_offload_port_dnat(struct net *net,
static void flow_offload_ipv4_checksum(struct net *net, static void flow_offload_ipv4_checksum(struct net *net,
const struct flow_offload *flow, const struct flow_offload *flow,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto; u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_CSUM; entry->id = FLOW_ACTION_CSUM;
entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR; entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
...@@ -316,8 +331,9 @@ static void flow_offload_ipv4_checksum(struct net *net, ...@@ -316,8 +331,9 @@ static void flow_offload_ipv4_checksum(struct net *net,
static void flow_offload_redirect(const struct flow_offload *flow, static void flow_offload_redirect(const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct flow_action_entry *entry) struct nf_flow_rule *flow_rule)
{ {
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
struct rtable *rt; struct rtable *rt;
rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
...@@ -330,39 +346,25 @@ int nf_flow_rule_route(struct net *net, const struct flow_offload *flow, ...@@ -330,39 +346,25 @@ int nf_flow_rule_route(struct net *net, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir, enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule) struct nf_flow_rule *flow_rule)
{ {
int i; if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
if (flow_offload_eth_src(net, flow, dir,
&flow_rule->rule->action.entries[0],
&flow_rule->rule->action.entries[1]) < 0)
return -1; return -1;
if (flow_offload_eth_dst(net, flow, dir,
&flow_rule->rule->action.entries[2],
&flow_rule->rule->action.entries[3]) < 0)
return -1;
i = 4;
if (flow->flags & FLOW_OFFLOAD_SNAT) { if (flow->flags & FLOW_OFFLOAD_SNAT) {
flow_offload_ipv4_snat(net, flow, dir, flow_offload_ipv4_snat(net, flow, dir, flow_rule);
&flow_rule->rule->action.entries[i++]); flow_offload_port_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir,
&flow_rule->rule->action.entries[i++]);
} }
if (flow->flags & FLOW_OFFLOAD_DNAT) { if (flow->flags & FLOW_OFFLOAD_DNAT) {
flow_offload_ipv4_dnat(net, flow, dir, flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
&flow_rule->rule->action.entries[i++]); flow_offload_port_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir,
&flow_rule->rule->action.entries[i++]);
} }
if (flow->flags & FLOW_OFFLOAD_SNAT || if (flow->flags & FLOW_OFFLOAD_SNAT ||
flow->flags & FLOW_OFFLOAD_DNAT) flow->flags & FLOW_OFFLOAD_DNAT)
flow_offload_ipv4_checksum(net, flow, flow_offload_ipv4_checksum(net, flow, flow_rule);
&flow_rule->rule->action.entries[i++]);
flow_offload_redirect(flow, dir, &flow_rule->rule->action.entries[i++]); flow_offload_redirect(flow, dir, flow_rule);
return i; return 0;
} }
EXPORT_SYMBOL_GPL(nf_flow_rule_route); EXPORT_SYMBOL_GPL(nf_flow_rule_route);
...@@ -375,7 +377,7 @@ nf_flow_offload_rule_alloc(struct net *net, ...@@ -375,7 +377,7 @@ nf_flow_offload_rule_alloc(struct net *net,
const struct flow_offload *flow = offload->flow; const struct flow_offload *flow = offload->flow;
const struct flow_offload_tuple *tuple; const struct flow_offload_tuple *tuple;
struct nf_flow_rule *flow_rule; struct nf_flow_rule *flow_rule;
int err = -ENOMEM, num_actions; int err = -ENOMEM;
flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL); flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
if (!flow_rule) if (!flow_rule)
...@@ -394,12 +396,10 @@ nf_flow_offload_rule_alloc(struct net *net, ...@@ -394,12 +396,10 @@ nf_flow_offload_rule_alloc(struct net *net,
if (err < 0) if (err < 0)
goto err_flow_match; goto err_flow_match;
num_actions = flowtable->type->action(net, flow, dir, flow_rule); flow_rule->rule->action.num_entries = 0;
if (num_actions < 0) if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
goto err_flow_match; goto err_flow_match;
flow_rule->rule->action.num_entries = num_actions;
return flow_rule; return flow_rule;
err_flow_match: err_flow_match:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment