Commit a46e3d5e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'nfp-flow-independent-tc-action-hardware-offload'

Simon Horman says:

====================
nfp: flow-independent tc action hardware offload

Baowen Zheng says:

Allow nfp NIC to offload tc actions independent of flows.

The motivation for this work is to offload tc actions independent of flows
for nfp NIC. We allow nfp driver to provide hardware offload of OVS
metering feature - which calls for policers that may be used by multiple
flows and whose lifecycle is independent of any flows that use them.

When nfp driver tries to offload a flow table using the independent action,
the driver will search if the action is already offloaded to the hardware.
If not, the flow table offload will fail.

When the nfp NIC successes to offload an action, the user can check
in_hw_count when dumping the tc action.

Tc cli command to offload and dump an action:

 # tc actions add action police rate 100mbit burst 10000k index 200 skip_sw

 # tc -s -d actions list action police

 total acts 1

      action order 0:  police 0xc8 rate 100Mbit burst 10000Kb mtu 2Kb action reclassify
      overhead 0b linklayer ethernet
      ref 1 bind 0  installed 142 sec used 0 sec
      Action statistics:
      Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
      backlog 0b 0p requeues 0
      skip_sw in_hw in_hw_count 1
      used_hw_stats delayed
====================

Link: https://lore.kernel.org/r/20220223162302.97609-1-simon.horman@corigine.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7bbb765b 5e98743c
......@@ -922,6 +922,51 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
}
static struct nfp_fl_meter *nfp_fl_meter(char *act_data)
{
size_t act_size = sizeof(struct nfp_fl_meter);
struct nfp_fl_meter *meter_act;
meter_act = (struct nfp_fl_meter *)act_data;
memset(meter_act, 0, act_size);
meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER;
meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
return meter_act;
}
static int
nfp_flower_meter_action(struct nfp_app *app,
const struct flow_action_entry *action,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct nfp_fl_meter *fl_meter;
u32 meter_id;
if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload:meter action size beyond the allowed maximum");
return -EOPNOTSUPP;
}
meter_id = action->hw_index;
if (!nfp_flower_search_meter_entry(app, meter_id)) {
NL_SET_ERR_MSG_MOD(extack,
"can not offload flow table with unsupported police action.\n");
return -EOPNOTSUPP;
}
fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]);
*a_len += sizeof(struct nfp_fl_meter);
fl_meter->meter_id = cpu_to_be32(meter_id);
return 0;
}
static int
nfp_flower_output_action(struct nfp_app *app,
const struct flow_action_entry *act,
......@@ -985,6 +1030,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
......@@ -1149,6 +1195,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*pkt_host = true;
break;
case FLOW_ACTION_POLICE:
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: unsupported police action in action list");
return -EOPNOTSUPP;
}
err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
extack);
if (err)
return err;
break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
......
......@@ -85,6 +85,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
#define NFP_FL_ACTION_OPCODE_METER 24
#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
......@@ -260,6 +261,12 @@ struct nfp_fl_set_mpls {
__be32 lse;
};
struct nfp_fl_meter {
struct nfp_fl_act_head head;
__be16 reserved;
__be32 meter_id;
};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
......
......@@ -12,7 +12,9 @@
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
#include <linux/idr.h>
......@@ -48,6 +50,7 @@ struct nfp_app;
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_QOS_PPS BIT(9)
#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
......@@ -63,7 +66,8 @@ struct nfp_app;
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
NFP_FL_FEATS_VLAN_QINQ | \
NFP_FL_FEATS_QOS_PPS)
NFP_FL_FEATS_QOS_PPS | \
NFP_FL_FEATS_QOS_METER)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
......@@ -191,6 +195,8 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
* @meter_stats_lock: Lock on meter stats updates
* @meter_table: Hash table used to store the meter table
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
......@@ -228,6 +234,8 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
struct mutex meter_stats_lock; /* Protect the meter stats */
struct rhashtable meter_table;
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
......@@ -374,6 +382,31 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
struct nfp_meter_stats_entry {
u64 pkts;
u64 bytes;
u64 drops;
};
struct nfp_meter_entry {
struct rhash_head ht_node;
u32 meter_id;
bool bps;
u32 rate;
u32 burst;
u64 used;
struct nfp_meter_stats {
u64 update;
struct nfp_meter_stats_entry curr;
struct nfp_meter_stats_entry prev;
} stats;
};
enum nfp_meter_op {
NFP_METER_ADD,
NFP_METER_DEL,
};
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
......@@ -569,4 +602,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
int nfp_setup_tc_act_offload(struct nfp_app *app,
struct flow_offload_action *fl_act);
int nfp_init_meter_table(struct nfp_app *app);
void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv);
void nfp_act_stats_reply(struct nfp_app *app, void *pmsg);
int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
bool pps, u32 id, u32 rate, u32 burst);
int nfp_flower_setup_meter_entry(struct nfp_app *app,
const struct flow_action_entry *action,
enum nfp_meter_op op,
u32 meter_id);
struct nfp_meter_entry *
nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id);
#endif
......@@ -1861,6 +1861,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
return 0;
}
static int
nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
{
if (!data)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_ACT:
return nfp_setup_tc_act_offload(app, data);
default:
return -EOPNOTSUPP;
}
}
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
......@@ -1868,7 +1882,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
return -EOPNOTSUPP;
return nfp_setup_tc_no_dev(cb_priv, type, data);
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/hash.h>
#include <linux/hashtable.h>
#include <linux/jhash.h>
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
......@@ -11,10 +15,14 @@
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
#define NFP_FL_QOS_PPS BIT(15)
#define NFP_FL_QOS_METER BIT(10)
struct nfp_police_cfg_head {
__be32 flags_opts;
union {
__be32 meter_id;
__be32 port;
};
};
enum NFP_FL_QOS_TYPES {
......@@ -46,7 +54,15 @@ enum NFP_FL_QOS_TYPES {
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* Word[0](FLag options):
* [15] p(pps) 1 for pps ,0 for bps
* [15] p(pps) 1 for pps, 0 for bps
*
* Meter control message
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-------------------------------+-+---+-----+-+---------+-+---+-+
* | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R|
* +-------------------------------+-+---+-----+-+---------+-+---+-+
* | meter ID |
* +-------------------------------+-------------------------------+
*
*/
struct nfp_police_config {
......@@ -67,6 +83,40 @@ struct nfp_police_stats_reply {
__be64 drop_pkts;
};
int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
bool pps, u32 id, u32 rate, u32 burst)
{
struct nfp_police_config *config;
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config));
if (pps)
config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
if (!ingress)
config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
if (ingress)
config->head.port = cpu_to_be32(id);
else
config->head.meter_id = cpu_to_be32(id);
config->bkt_tkn_p = cpu_to_be32(burst);
config->bkt_tkn_c = cpu_to_be32(burst);
config->pbs = cpu_to_be32(burst);
config->cbs = cpu_to_be32(burst);
config->pir = cpu_to_be32(rate);
config->cir = cpu_to_be32(rate);
nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
......@@ -77,14 +127,13 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_priv *fl_priv = app->priv;
struct flow_action_entry *action = NULL;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config;
u32 netdev_port_id, i;
struct nfp_repr *repr;
struct sk_buff *skb;
bool pps_support;
u32 bps_num = 0;
u32 pps_num = 0;
u32 burst;
bool pps;
u64 rate;
if (!nfp_netdev_is_nfp_repr(netdev)) {
......@@ -169,23 +218,12 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
}
if (rate != 0) {
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config));
pps = false;
if (action->police.rate_pkt_ps > 0)
config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
config->head.port = cpu_to_be32(netdev_port_id);
config->bkt_tkn_p = cpu_to_be32(burst);
config->bkt_tkn_c = cpu_to_be32(burst);
config->pbs = cpu_to_be32(burst);
config->cbs = cpu_to_be32(burst);
config->pir = cpu_to_be32(rate);
config->cir = cpu_to_be32(rate);
nfp_ctrl_tx(repr->app->ctrl, skb);
pps = true;
nfp_flower_offload_one_police(repr->app, true,
pps, netdev_port_id,
rate, burst);
}
}
repr_priv->qos_table.netdev_port_id = netdev_port_id;
......@@ -266,6 +304,9 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
return nfp_act_stats_reply(app, msg);
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
......@@ -297,7 +338,7 @@ void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
u32 netdev_port_id)
u32 id, bool ingress)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
......@@ -308,10 +349,15 @@ nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
GFP_ATOMIC);
if (!skb)
return;
head = nfp_flower_cmsg_get_data(skb);
memset(head, 0, sizeof(struct nfp_police_cfg_head));
head->port = cpu_to_be32(netdev_port_id);
if (ingress) {
head->port = cpu_to_be32(id);
} else {
head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
head->meter_id = cpu_to_be32(id);
}
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
......@@ -341,7 +387,8 @@ nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
if (!netdev_port_id)
continue;
nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
nfp_flower_stats_rlim_request(fl_priv,
netdev_port_id, true);
}
}
......@@ -359,6 +406,8 @@ static void update_stats_cache(struct work_struct *work)
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
nfp_flower_stats_meter_request_all(fl_priv);
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
......@@ -406,6 +455,9 @@ void nfp_flower_qos_init(struct nfp_app *app)
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
mutex_init(&fl_priv->meter_stats_lock);
nfp_init_meter_table(app);
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
......@@ -441,3 +493,333 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
/* offload tc action, currently only for tc police */
static const struct rhashtable_params stats_meter_table_params = {
.key_offset = offsetof(struct nfp_meter_entry, meter_id),
.head_offset = offsetof(struct nfp_meter_entry, ht_node),
.key_len = sizeof(u32),
};
struct nfp_meter_entry *
nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
{
struct nfp_flower_priv *priv = app->priv;
return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
stats_meter_table_params);
}
static struct nfp_meter_entry *
nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
{
struct nfp_meter_entry *meter_entry = NULL;
struct nfp_flower_priv *priv = app->priv;
meter_entry = rhashtable_lookup_fast(&priv->meter_table,
&meter_id,
stats_meter_table_params);
if (meter_entry)
return meter_entry;
meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
if (!meter_entry)
return NULL;
meter_entry->meter_id = meter_id;
meter_entry->used = jiffies;
if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
stats_meter_table_params)) {
kfree(meter_entry);
return NULL;
}
priv->qos_rate_limiters++;
if (priv->qos_rate_limiters == 1)
schedule_delayed_work(&priv->qos_stats_work,
NFP_FL_QOS_UPDATE);
return meter_entry;
}
static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
{
struct nfp_meter_entry *meter_entry = NULL;
struct nfp_flower_priv *priv = app->priv;
meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
stats_meter_table_params);
if (!meter_entry)
return;
rhashtable_remove_fast(&priv->meter_table,
&meter_entry->ht_node,
stats_meter_table_params);
kfree(meter_entry);
priv->qos_rate_limiters--;
if (!priv->qos_rate_limiters)
cancel_delayed_work_sync(&priv->qos_stats_work);
}
int nfp_flower_setup_meter_entry(struct nfp_app *app,
const struct flow_action_entry *action,
enum nfp_meter_op op,
u32 meter_id)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_meter_entry *meter_entry = NULL;
int err = 0;
mutex_lock(&fl_priv->meter_stats_lock);
switch (op) {
case NFP_METER_DEL:
nfp_flower_del_meter_entry(app, meter_id);
goto exit_unlock;
case NFP_METER_ADD:
meter_entry = nfp_flower_add_meter_entry(app, meter_id);
break;
default:
err = -EOPNOTSUPP;
goto exit_unlock;
}
if (!meter_entry) {
err = -ENOMEM;
goto exit_unlock;
}
if (action->police.rate_bytes_ps > 0) {
meter_entry->bps = true;
meter_entry->rate = action->police.rate_bytes_ps;
meter_entry->burst = action->police.burst;
} else {
meter_entry->bps = false;
meter_entry->rate = action->police.rate_pkt_ps;
meter_entry->burst = action->police.burst_pkt;
}
exit_unlock:
mutex_unlock(&fl_priv->meter_stats_lock);
return err;
}
int nfp_init_meter_table(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
}
void
nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
{
struct nfp_meter_entry *meter_entry = NULL;
struct rhashtable_iter iter;
mutex_lock(&fl_priv->meter_stats_lock);
rhashtable_walk_enter(&fl_priv->meter_table, &iter);
rhashtable_walk_start(&iter);
while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
if (IS_ERR(meter_entry))
continue;
nfp_flower_stats_rlim_request(fl_priv,
meter_entry->meter_id, false);
}
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
mutex_unlock(&fl_priv->meter_stats_lock);
}
static int
nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
struct netlink_ext_ack *extack)
{
struct flow_action_entry *paction = &fl_act->action.entries[0];
u32 action_num = fl_act->action.num_entries;
struct nfp_flower_priv *fl_priv = app->priv;
struct flow_action_entry *action = NULL;
u32 burst, i, meter_id;
bool pps_support, pps;
bool add = false;
u64 rate;
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
for (i = 0 ; i < action_num; i++) {
/*set qos associate data for this interface */
action = paction + i;
if (action->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
continue;
}
if (action->police.rate_bytes_ps > 0) {
rate = action->police.rate_bytes_ps;
burst = action->police.burst;
} else if (action->police.rate_pkt_ps > 0 && pps_support) {
rate = action->police.rate_pkt_ps;
burst = action->police.burst_pkt;
} else {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: unsupported qos rate limit");
continue;
}
if (rate != 0) {
meter_id = action->hw_index;
if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
continue;
pps = false;
if (action->police.rate_pkt_ps > 0)
pps = true;
nfp_flower_offload_one_police(app, false, pps, meter_id,
rate, burst);
add = true;
}
}
return add ? 0 : -EOPNOTSUPP;
}
static int
nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
struct netlink_ext_ack *extack)
{
struct nfp_meter_entry *meter_entry = NULL;
struct nfp_police_config *config;
struct sk_buff *skb;
u32 meter_id;
bool pps;
/*delete qos associate data for this interface */
if (fl_act->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
meter_id = fl_act->index;
meter_entry = nfp_flower_search_meter_entry(app, meter_id);
if (!meter_entry) {
NL_SET_ERR_MSG_MOD(extack,
"no meter entry when delete the action index.\n");
return -ENOENT;
}
pps = !meter_entry->bps;
skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
if (!skb)
return -ENOMEM;
config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config));
config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
config->head.meter_id = cpu_to_be32(meter_id);
if (pps)
config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
nfp_ctrl_tx(app->ctrl, skb);
nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
return 0;
}
void
nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_meter_entry *meter_entry = NULL;
struct nfp_police_stats_reply *msg = pmsg;
u32 meter_id;
meter_id = be32_to_cpu(msg->head.meter_id);
mutex_lock(&fl_priv->meter_stats_lock);
meter_entry = nfp_flower_search_meter_entry(app, meter_id);
if (!meter_entry)
goto exit_unlock;
meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
be64_to_cpu(msg->drop_pkts);
meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
be64_to_cpu(msg->drop_bytes);
meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
if (!meter_entry->stats.update) {
meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
}
meter_entry->stats.update = jiffies;
exit_unlock:
mutex_unlock(&fl_priv->meter_stats_lock);
}
static int
nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_meter_entry *meter_entry = NULL;
u64 diff_bytes, diff_pkts, diff_drops;
int err = 0;
if (fl_act->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
mutex_lock(&fl_priv->meter_stats_lock);
meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
if (!meter_entry) {
err = -ENOENT;
goto exit_unlock;
}
diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
meter_entry->stats.update,
FLOW_ACTION_HW_STATS_DELAYED);
meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
exit_unlock:
mutex_unlock(&fl_priv->meter_stats_lock);
return err;
}
int nfp_setup_tc_act_offload(struct nfp_app *app,
struct flow_offload_action *fl_act)
{
struct netlink_ext_ack *extack = fl_act->extack;
struct nfp_flower_priv *fl_priv = app->priv;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
return -EOPNOTSUPP;
switch (fl_act->command) {
case FLOW_ACT_REPLACE:
return nfp_act_install_actions(app, fl_act, extack);
case FLOW_ACT_DESTROY:
return nfp_act_remove_actions(app, fl_act, extack);
case FLOW_ACT_STATS:
return nfp_act_stats_actions(app, fl_act, extack);
default:
return -EOPNOTSUPP;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment