Commit 2b68d659 authored by Oz Shlomo's avatar Oz Shlomo Committed by Paolo Abeni

net/mlx5e: TC, support per action stats

Extend the action stats callback implementation to update stats for actions
that are associated with hw counters.
Note that the callback may be called from tc action utility or from tc
flower. Both apis expect the driver to return the stats difference from
the last update. As such, query the raw counter value and maintain
the diff from the last api call in the tc layer, instead of the fs_core
layer.
Signed-off-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent d13674b1
...@@ -589,7 +589,7 @@ mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv, ...@@ -589,7 +589,7 @@ mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
act = mlx5e_tc_act_get(fl_act->id, ns_type); act = mlx5e_tc_act_get(fl_act->id, ns_type);
if (!act || !act->stats_action) if (!act || !act->stats_action)
return -EOPNOTSUPP; return mlx5e_tc_fill_action_stats(priv, fl_act);
return act->stats_action(priv, fl_act); return act->stats_action(priv, fl_act);
} }
......
...@@ -102,6 +102,9 @@ mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle, ...@@ -102,6 +102,9 @@ mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
struct mlx5e_tc_act_stats *act_stats; struct mlx5e_tc_act_stats *act_stats;
int i; int i;
if (!flow_flag_test(flow, USE_ACT_STATS))
return;
list_for_each_entry(attr, &flow->attrs, list) { list_for_each_entry(attr, &flow->attrs, list) {
for (i = 0; i < attr->tc_act_cookies_count; i++) { for (i = 0; i < attr->tc_act_cookies_count; i++) {
struct rhashtable *ht = &handle->ht; struct rhashtable *ht = &handle->ht;
...@@ -130,6 +133,9 @@ mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle, ...@@ -130,6 +133,9 @@ mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
int err; int err;
int i; int i;
if (!flow_flag_test(flow, USE_ACT_STATS))
return 0;
list_for_each_entry(attr, &flow->attrs, list) { list_for_each_entry(attr, &flow->attrs, list) {
if (attr->counter) if (attr->counter)
curr_counter = attr->counter; curr_counter = attr->counter;
...@@ -151,3 +157,41 @@ mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle, ...@@ -151,3 +157,41 @@ mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
mlx5e_tc_act_stats_del_flow(handle, flow); mlx5e_tc_act_stats_del_flow(handle, flow);
return err; return err;
} }
int
mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
struct flow_offload_action *fl_act)
{
struct rhashtable *ht = &handle->ht;
struct mlx5e_tc_act_stats *item;
struct mlx5e_tc_act_stats key;
u64 pkts, bytes, lastused;
int err = 0;
key.tc_act_cookie = fl_act->cookie;
rcu_read_lock();
item = rhashtable_lookup(ht, &key, act_counters_ht_params);
if (!item) {
rcu_read_unlock();
err = -ENOENT;
goto err_out;
}
mlx5_fc_query_cached_raw(item->counter,
&bytes, &pkts, &lastused);
flow_stats_update(&fl_act->stats,
bytes - item->lastbytes,
pkts - item->lastpackets,
0, lastused, FLOW_ACTION_HW_STATS_DELAYED);
item->lastpackets = pkts;
item->lastbytes = bytes;
rcu_read_unlock();
return 0;
err_out:
return err;
}
...@@ -20,4 +20,8 @@ void ...@@ -20,4 +20,8 @@ void
mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle, mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
struct mlx5e_tc_flow *flow); struct mlx5e_tc_flow *flow);
int
mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
struct flow_offload_action *fl_act);
#endif /* __MLX5_EN_ACT_STATS_H__ */ #endif /* __MLX5_EN_ACT_STATS_H__ */
...@@ -30,6 +30,7 @@ enum { ...@@ -30,6 +30,7 @@ enum {
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9, MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10, MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11, MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 12,
}; };
struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_flow_parse_attr {
......
...@@ -4117,6 +4117,7 @@ parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act, ...@@ -4117,6 +4117,7 @@ parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
/* branching action requires its own counter */ /* branching action requires its own counter */
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_flag_set(flow, USE_ACT_STATS);
return 0; return 0;
...@@ -4967,6 +4968,12 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4967,6 +4968,12 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
return err; return err;
} }
int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act)
{
return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags) struct flow_cls_offload *f, unsigned long flags)
{ {
...@@ -4993,12 +5000,16 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4993,12 +5000,16 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
} }
if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) { if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
} else {
counter = mlx5e_tc_get_counter(flow); counter = mlx5e_tc_get_counter(flow);
if (!counter) if (!counter)
goto errout; goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
} }
}
/* Under multipath it's possible for one rule to be currently /* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded. * un-offloaded while the other rule is offloaded.
...@@ -5013,6 +5024,9 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -5013,6 +5024,9 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 packets2; u64 packets2;
u64 lastuse2; u64 lastuse2;
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
} else {
counter = mlx5e_tc_get_counter(flow->peer_flow); counter = mlx5e_tc_get_counter(flow->peer_flow);
if (!counter) if (!counter)
goto no_peer_counter; goto no_peer_counter;
...@@ -5022,6 +5036,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -5022,6 +5036,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
packets += packets2; packets += packets2;
lastuse = max_t(u64, lastuse, lastuse2); lastuse = max_t(u64, lastuse, lastuse2);
} }
}
no_peer_counter: no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
......
...@@ -199,6 +199,8 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -199,6 +199,8 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags); struct flow_cls_offload *f, unsigned long flags);
int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act);
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f); struct tc_cls_matchall_offload *f);
......
...@@ -504,6 +504,16 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter, ...@@ -504,6 +504,16 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter,
counter->lastpackets = c.packets; counter->lastpackets = c.packets;
} }
void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
struct mlx5_fc_cache c = counter->cache;
*bytes = c.bytes;
*packets = c.packets;
*lastuse = c.lastuse;
}
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork, struct delayed_work *dwork,
unsigned long delay) unsigned long delay)
......
...@@ -296,6 +296,8 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); ...@@ -296,6 +296,8 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse); u64 *bytes, u64 *packets, u64 *lastuse);
void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes); u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter); u32 mlx5_fc_id(struct mlx5_fc *counter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment