Commit 25a443f7 authored by John Hurley's avatar John Hurley Committed by David S. Miller

net: sched: allow indirect blocks to bind to clsact in TC

When a device is bound to a clsact qdisc, bind events are triggered to
registered drivers for both ingress and egress. However, if a driver
registers to such a device using the indirect block routines then it is
assumed that it is only interested in ingress offload and so only replays
ingress bind/unbind messages.

The NFP driver supports the offload of some egress filters when
registering to a block with qdisc of type clsact. However, on unregister,
if the block is still active, it will not receive an unbind egress
notification which can prevent proper cleanup of other registered
callbacks.

Modify the indirect block callback command in TC to send messages of
ingress and/or egress bind depending on the qdisc in use. NFP currently
supports egress offload for TC flower offload so the changes are only
added to TC.

Fixes: 4d12ba42 ("nfp: flower: allow offloading of matches on 'internal' ports")
Signed-off-by: default avatarJohn Hurley <john.hurley@netronome.com>
Acked-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dbad3408
...@@ -626,15 +626,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) ...@@ -626,15 +626,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
static int tcf_block_setup(struct tcf_block *block, static int tcf_block_setup(struct tcf_block *block,
struct flow_block_offload *bo); struct flow_block_offload *bo);
static void tc_indr_block_ing_cmd(struct net_device *dev, static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
struct tcf_block *block, flow_indr_block_bind_cb_t *cb, void *cb_priv,
flow_indr_block_bind_cb_t *cb, enum flow_block_command command, bool ingress)
void *cb_priv,
enum flow_block_command command)
{ {
struct flow_block_offload bo = { struct flow_block_offload bo = {
.command = command, .command = command,
.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, .binder_type = ingress ?
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
.net = dev_net(dev), .net = dev_net(dev),
.block_shared = tcf_block_non_null_shared(block), .block_shared = tcf_block_non_null_shared(block),
}; };
...@@ -652,9 +652,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev, ...@@ -652,9 +652,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev,
up_write(&block->cb_lock); up_write(&block->cb_lock);
} }
static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
{ {
const struct Qdisc_class_ops *cops; const struct Qdisc_class_ops *cops;
const struct Qdisc_ops *ops;
struct Qdisc *qdisc; struct Qdisc *qdisc;
if (!dev_ingress_queue(dev)) if (!dev_ingress_queue(dev))
...@@ -664,24 +665,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) ...@@ -664,24 +665,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
if (!qdisc) if (!qdisc)
return NULL; return NULL;
cops = qdisc->ops->cl_ops; ops = qdisc->ops;
if (!ops)
return NULL;
if (!ingress && !strcmp("ingress", ops->id))
return NULL;
cops = ops->cl_ops;
if (!cops) if (!cops)
return NULL; return NULL;
if (!cops->tcf_block) if (!cops->tcf_block)
return NULL; return NULL;
return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); return cops->tcf_block(qdisc,
ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
NULL);
} }
static void tc_indr_block_get_and_ing_cmd(struct net_device *dev, static void tc_indr_block_get_and_cmd(struct net_device *dev,
flow_indr_block_bind_cb_t *cb, flow_indr_block_bind_cb_t *cb,
void *cb_priv, void *cb_priv,
enum flow_block_command command) enum flow_block_command command)
{ {
struct tcf_block *block = tc_dev_ingress_block(dev); struct tcf_block *block;
block = tc_dev_block(dev, true);
tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); block = tc_dev_block(dev, false);
tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
} }
static void tc_indr_block_call(struct tcf_block *block, static void tc_indr_block_call(struct tcf_block *block,
...@@ -3626,9 +3640,9 @@ static struct pernet_operations tcf_net_ops = { ...@@ -3626,9 +3640,9 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net), .size = sizeof(struct tcf_net),
}; };
static struct flow_indr_block_entry block_ing_entry = { static struct flow_indr_block_entry block_entry = {
.cb = tc_indr_block_get_and_ing_cmd, .cb = tc_indr_block_get_and_cmd,
.list = LIST_HEAD_INIT(block_ing_entry.list), .list = LIST_HEAD_INIT(block_entry.list),
}; };
static int __init tc_filter_init(void) static int __init tc_filter_init(void)
...@@ -3643,7 +3657,7 @@ static int __init tc_filter_init(void) ...@@ -3643,7 +3657,7 @@ static int __init tc_filter_init(void)
if (err) if (err)
goto err_register_pernet_subsys; goto err_register_pernet_subsys;
flow_indr_add_block_cb(&block_ing_entry); flow_indr_add_block_cb(&block_entry);
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
RTNL_FLAG_DOIT_UNLOCKED); RTNL_FLAG_DOIT_UNLOCKED);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment