Commit 21c4c60b authored by Rahul Lakkireddy's avatar Rahul Lakkireddy Committed by David S. Miller

cxgb4: add TC-MATCHALL classifier ingress offload

Add TC-MATCHALL classifier ingress offload support. The same actions
supported by existing TC-FLOWER offload can be applied to all incoming
traffic on the underlying interface.

Ensure the rule priority doesn't conflict with existing rules in the
TCAM. Only 1 ingress matchall rule can be active at a time on the
underlying interface.

v5:
- No change.

v4:
- Added check to ensure the matchall rule's prio doesn't conflict with
  other rules in TCAM.
- Added logic to fill default mask for VIID, if none has been
  provided, to prevent conflict with duplicate VIID rules.
- Used existing variables in private structure to fill VIID info,
  instead of extracting the info manually.

v3:
- No change.

v2:
- Removed logic to fetch free index from end of TCAM. Must maintain
  same ordering as in kernel.
Signed-off-by: default avatarRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 41ec03e5
...@@ -872,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs) ...@@ -872,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.tos |= ~0; fs->mask.tos |= ~0;
if (fs->val.proto && !fs->mask.proto) if (fs->val.proto && !fs->mask.proto)
fs->mask.proto |= ~0; fs->mask.proto |= ~0;
if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
fs->mask.pfvf_vld |= ~0;
if (fs->val.pf && !fs->mask.pf)
fs->mask.pf |= ~0;
if (fs->val.vf && !fs->mask.vf)
fs->mask.vf |= ~0;
for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) { for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
lip |= fs->val.lip[i]; lip |= fs->val.lip[i];
......
...@@ -3236,7 +3236,8 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev, ...@@ -3236,7 +3236,8 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
} }
static int cxgb_setup_tc_matchall(struct net_device *dev, static int cxgb_setup_tc_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall) struct tc_cls_matchall_offload *cls_matchall,
bool ingress)
{ {
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
...@@ -3245,9 +3246,13 @@ static int cxgb_setup_tc_matchall(struct net_device *dev, ...@@ -3245,9 +3246,13 @@ static int cxgb_setup_tc_matchall(struct net_device *dev,
switch (cls_matchall->command) { switch (cls_matchall->command) {
case TC_CLSMATCHALL_REPLACE: case TC_CLSMATCHALL_REPLACE:
return cxgb4_tc_matchall_replace(dev, cls_matchall); return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
case TC_CLSMATCHALL_DESTROY: case TC_CLSMATCHALL_DESTROY:
return cxgb4_tc_matchall_destroy(dev, cls_matchall); return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
case TC_CLSMATCHALL_STATS:
if (ingress)
return cxgb4_tc_matchall_stats(dev, cls_matchall);
break;
default: default:
break; break;
} }
...@@ -3277,6 +3282,8 @@ static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, ...@@ -3277,6 +3282,8 @@ static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
return cxgb_setup_tc_cls_u32(dev, type_data); return cxgb_setup_tc_cls_u32(dev, type_data);
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return cxgb_setup_tc_flower(dev, type_data); return cxgb_setup_tc_flower(dev, type_data);
case TC_SETUP_CLSMATCHALL:
return cxgb_setup_tc_matchall(dev, type_data, true);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -3301,7 +3308,7 @@ static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, ...@@ -3301,7 +3308,7 @@ static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
switch (type) { switch (type) {
case TC_SETUP_CLSMATCHALL: case TC_SETUP_CLSMATCHALL:
return cxgb_setup_tc_matchall(dev, type_data); return cxgb_setup_tc_matchall(dev, type_data, false);
default: default:
break; break;
} }
......
...@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, ...@@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
} }
} }
static void cxgb4_process_flow_actions(struct net_device *in, void cxgb4_process_flow_actions(struct net_device *in,
struct flow_cls_offload *cls, struct flow_action *actions,
struct ch_filter_specification *fs) struct ch_filter_specification *fs)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act; struct flow_action_entry *act;
int i; int i;
flow_action_for_each(i, act, &rule->action) { flow_action_for_each(i, act, actions) {
switch (act->id) { switch (act->id) {
case FLOW_ACTION_ACCEPT: case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS; fs->action = FILTER_PASS;
...@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev, ...@@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev,
return true; return true;
} }
static int cxgb4_validate_flow_actions(struct net_device *dev, int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_cls_offload *cls) struct flow_action *actions)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_action_entry *act; struct flow_action_entry *act;
bool act_redir = false; bool act_redir = false;
bool act_pedit = false; bool act_pedit = false;
bool act_vlan = false; bool act_vlan = false;
int i; int i;
flow_action_for_each(i, act, &rule->action) { flow_action_for_each(i, act, actions) {
switch (act->id) { switch (act->id) {
case FLOW_ACTION_ACCEPT: case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_DROP: case FLOW_ACTION_DROP:
...@@ -636,6 +634,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, ...@@ -636,6 +634,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
int cxgb4_tc_flower_replace(struct net_device *dev, int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls) struct flow_cls_offload *cls)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack; struct netlink_ext_ack *extack = cls->common.extack;
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower; struct ch_tc_flower_entry *ch_flower;
...@@ -643,7 +642,7 @@ int cxgb4_tc_flower_replace(struct net_device *dev, ...@@ -643,7 +642,7 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
struct filter_ctx ctx; struct filter_ctx ctx;
int fidx, ret; int fidx, ret;
if (cxgb4_validate_flow_actions(dev, cls)) if (cxgb4_validate_flow_actions(dev, &rule->action))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls)) if (cxgb4_validate_flow_match(dev, cls))
...@@ -658,7 +657,7 @@ int cxgb4_tc_flower_replace(struct net_device *dev, ...@@ -658,7 +657,7 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
fs = &ch_flower->fs; fs = &ch_flower->fs;
fs->hitcnts = 1; fs->hitcnts = 1;
cxgb4_process_flow_match(dev, cls, fs); cxgb4_process_flow_match(dev, cls, fs);
cxgb4_process_flow_actions(dev, cls, fs); cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs); fs->hash = is_filter_exact_match(adap, fs);
if (fs->hash) { if (fs->hash) {
......
...@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields { ...@@ -108,6 +108,12 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0 #define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0 #define PEDIT_UDP_SPORT_DPORT 0x0
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
struct flow_action *actions);
int cxgb4_tc_flower_replace(struct net_device *dev, int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls); struct flow_cls_offload *cls);
int cxgb4_tc_flower_destroy(struct net_device *dev, int cxgb4_tc_flower_destroy(struct net_device *dev,
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#include "cxgb4.h" #include "cxgb4.h"
#include "cxgb4_tc_matchall.h" #include "cxgb4_tc_matchall.h"
#include "sched.h" #include "sched.h"
#include "cxgb4_uld.h"
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
static int cxgb4_matchall_egress_validate(struct net_device *dev, static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls) struct tc_cls_matchall_offload *cls)
...@@ -118,8 +121,85 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) ...@@ -118,8 +121,85 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
} }
static int cxgb4_matchall_alloc_filter(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct ch_filter_specification *fs;
int ret, fidx;
/* Note that TC uses prio 0 to indicate stack to generate
* automatic prio and hence doesn't pass prio 0 to driver.
* However, the hardware TCAM index starts from 0. Hence, the
* -1 here. 1 slot is enough to create a wildcard matchall
* VIID rule.
*/
if (cls->common.prio <= adap->tids.nftids)
fidx = cls->common.prio - 1;
else
fidx = cxgb4_get_free_ftid(dev, PF_INET);
/* Only insert MATCHALL rule if its priority doesn't conflict
* with existing rules in the LETCAM.
*/
if (fidx < 0 ||
!cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
}
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
fs = &tc_port_matchall->ingress.fs;
memset(fs, 0, sizeof(*fs));
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
fs->hitcnts = 1;
fs->val.pfvf_vld = 1;
fs->val.pf = adap->pf;
fs->val.vf = pi->vin;
cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
ret = cxgb4_set_filter(dev, fidx, fs);
if (ret)
return ret;
tc_port_matchall->ingress.tid = fidx;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
}
static int cxgb4_matchall_free_filter(struct net_device *dev)
{
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
&tc_port_matchall->ingress.fs);
if (ret)
return ret;
tc_port_matchall->ingress.packets = 0;
tc_port_matchall->ingress.bytes = 0;
tc_port_matchall->ingress.last_used = 0;
tc_port_matchall->ingress.tid = 0;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
return 0;
}
int cxgb4_tc_matchall_replace(struct net_device *dev, int cxgb4_tc_matchall_replace(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall) struct tc_cls_matchall_offload *cls_matchall,
bool ingress)
{ {
struct netlink_ext_ack *extack = cls_matchall->common.extack; struct netlink_ext_ack *extack = cls_matchall->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_port_matchall *tc_port_matchall;
...@@ -128,6 +208,22 @@ int cxgb4_tc_matchall_replace(struct net_device *dev, ...@@ -128,6 +208,22 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
int ret; int ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (ingress) {
if (tc_port_matchall->ingress.state ==
CXGB4_MATCHALL_STATE_ENABLED) {
NL_SET_ERR_MSG_MOD(extack,
"Only 1 Ingress MATCHALL can be offloaded");
return -ENOMEM;
}
ret = cxgb4_validate_flow_actions(dev,
&cls_matchall->rule->action);
if (ret)
return ret;
return cxgb4_matchall_alloc_filter(dev, cls_matchall);
}
if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Only 1 Egress MATCHALL can be offloaded"); "Only 1 Egress MATCHALL can be offloaded");
...@@ -142,13 +238,22 @@ int cxgb4_tc_matchall_replace(struct net_device *dev, ...@@ -142,13 +238,22 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
} }
int cxgb4_tc_matchall_destroy(struct net_device *dev, int cxgb4_tc_matchall_destroy(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall) struct tc_cls_matchall_offload *cls_matchall,
bool ingress)
{ {
struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev); struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (ingress) {
if (cls_matchall->cookie !=
tc_port_matchall->ingress.fs.tc_cookie)
return -ENOENT;
return cxgb4_matchall_free_filter(dev);
}
if (cls_matchall->cookie != tc_port_matchall->egress.cookie) if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
return -ENOENT; return -ENOENT;
...@@ -156,6 +261,39 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev, ...@@ -156,6 +261,39 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
return 0; return 0;
} }
int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall)
{
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
u64 packets, bytes;
int ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
return -ENOENT;
ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
&packets, &bytes,
tc_port_matchall->ingress.fs.hash);
if (ret)
return ret;
if (tc_port_matchall->ingress.packets != packets) {
flow_stats_update(&cls_matchall->stats,
bytes - tc_port_matchall->ingress.bytes,
packets - tc_port_matchall->ingress.packets,
tc_port_matchall->ingress.last_used);
tc_port_matchall->ingress.packets = packets;
tc_port_matchall->ingress.bytes = bytes;
tc_port_matchall->ingress.last_used = jiffies;
}
return 0;
}
static void cxgb4_matchall_disable_offload(struct net_device *dev) static void cxgb4_matchall_disable_offload(struct net_device *dev)
{ {
struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_port_matchall *tc_port_matchall;
...@@ -165,6 +303,9 @@ static void cxgb4_matchall_disable_offload(struct net_device *dev) ...@@ -165,6 +303,9 @@ static void cxgb4_matchall_disable_offload(struct net_device *dev)
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
cxgb4_matchall_free_tc(dev); cxgb4_matchall_free_tc(dev);
if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
cxgb4_matchall_free_filter(dev);
} }
int cxgb4_init_tc_matchall(struct adapter *adap) int cxgb4_init_tc_matchall(struct adapter *adap)
......
...@@ -17,8 +17,18 @@ struct cxgb4_matchall_egress_entry { ...@@ -17,8 +17,18 @@ struct cxgb4_matchall_egress_entry {
u64 cookie; /* Used to identify the MATCHALL rule offloaded */ u64 cookie; /* Used to identify the MATCHALL rule offloaded */
}; };
struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
u32 tid; /* Index to hardware filter entry */
struct ch_filter_specification fs; /* Filter entry */
u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */
u64 last_used; /* Last updated jiffies time */
};
struct cxgb4_tc_port_matchall { struct cxgb4_tc_port_matchall {
struct cxgb4_matchall_egress_entry egress; /* Egress offload info */ struct cxgb4_matchall_egress_entry egress; /* Egress offload info */
struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */
}; };
struct cxgb4_tc_matchall { struct cxgb4_tc_matchall {
...@@ -26,9 +36,13 @@ struct cxgb4_tc_matchall { ...@@ -26,9 +36,13 @@ struct cxgb4_tc_matchall {
}; };
int cxgb4_tc_matchall_replace(struct net_device *dev, int cxgb4_tc_matchall_replace(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall); struct tc_cls_matchall_offload *cls_matchall,
bool ingress);
int cxgb4_tc_matchall_destroy(struct net_device *dev, int cxgb4_tc_matchall_destroy(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall); struct tc_cls_matchall_offload *cls_matchall,
bool ingress);
int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall);
int cxgb4_init_tc_matchall(struct adapter *adap); int cxgb4_init_tc_matchall(struct adapter *adap);
void cxgb4_cleanup_tc_matchall(struct adapter *adap); void cxgb4_cleanup_tc_matchall(struct adapter *adap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment