Commit 361f7e4a authored by David S. Miller's avatar David S. Miller

Merge branch 'pps-policing'

Simon Horman says:

====================
net/sched: act_police: add support for packet-per-second policing

This series enhances the TC policer action implementation to allow a
policer action instance to enforce a rate-limit based on
packets-per-second, configurable using a packet-per-second rate and burst
parameters.

In the hope of aiding review this is broken up into three patches.

* [PATCH 1/3] flow_offload: add support for packet-per-second policing

  Add support for this feature to the flow_offload API that is used to allow
  programming flows, including TC rules and their actions, into hardware.

* [PATCH 2/3] flow_offload: reject configuration of packet-per-second policing in offload drivers

  Teach all exiting users of the flow_offload API that allow offload of
  policer action instances to reject offload if packet-per-second rate
  limiting is configured: none support it at this time

* [PATCH 3/3] net/sched: act_police: add support for packet-per-second policing

  With the above ground-work in place add the new feature to the TC policer
  action itself

With the above in place the feature may be used.

As follow-ups we plan to provide:
* Corresponding updates to iproute2
* Corresponding self tests (which depend on the iproute2 changes)
* Hardware offload support for the NFP driver

Key changes since v2:
* Added patches 1 and 2, which makes adding patch 3 safe for existing
  hardware offload of the policer action
* Re-worked patch 3 so that a TC policer action instance may be configured
  for packet-per-second or byte-per-second rate limiting, but not both.
* Corrected kdoc usage
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4849d9be 2ffe0395
......@@ -322,6 +322,12 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
if (act->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
goto out;
}
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
act->police.rate_bytes_ps,
......
......@@ -48,6 +48,11 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
......@@ -145,7 +150,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
......
......@@ -1221,6 +1221,11 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
if (entryp->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
err = -EOPNOTSUPP;
goto free_sfi;
}
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
......
......@@ -4538,6 +4538,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
if (act->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
}
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
......
......@@ -190,6 +190,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
if (act->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* The kernel might adjust the requested burst size so
* that it is not exactly a power of two. Re-adjust it
* here since the hardware only supports burst sizes
......
......@@ -220,6 +220,11 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
if (a->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
return -EOPNOTSUPP;
}
filter->action.police_ena = true;
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
......
......@@ -251,6 +251,12 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
return -EEXIST;
}
if (action->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"QoS offload not support packets per second");
return -EOPNOTSUPP;
}
pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
pol.burst = action->police.burst;
......
......@@ -104,6 +104,11 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
if (action->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not support packets per second");
return -EOPNOTSUPP;
}
rate = action->police.rate_bytes_ps;
burst = action->police.burst;
netdev_port_id = nfp_repr_get_port_id(netdev);
......
......@@ -234,6 +234,8 @@ struct flow_action_entry {
u32 index;
u32 burst;
u64 rate_bytes_ps;
u64 burst_pkt;
u64 rate_pkt_ps;
u32 mtu;
} police;
struct { /* FLOW_ACTION_CT */
......
......@@ -1242,6 +1242,20 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
struct psched_pktrate {
u64 rate_pkts_ps; /* packets per second */
u32 mult;
u8 shift;
};
static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
unsigned int pkt_num)
{
return ((u64)pkt_num * r->mult) >> r->shift;
}
void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
* The fast path only needs to access filter list and to update stats
*/
......
......@@ -10,10 +10,13 @@ struct tcf_police_params {
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
struct psched_pktrate ppsrate;
bool pps_present;
struct rcu_head rcu;
};
......@@ -24,6 +27,7 @@ struct tcf_police {
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_pkttoks;
s64 tcfp_t_c;
};
......@@ -97,6 +101,54 @@ static inline u32 tcf_police_burst(const struct tc_action *act)
return burst;
}
static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
params = rcu_dereference_protected(police->params,
lockdep_is_held(&police->tcf_lock));
return params->ppsrate.rate_pkts_ps;
}
static inline u32 tcf_police_burst_pkt(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
u32 burst;
params = rcu_dereference_protected(police->params,
lockdep_is_held(&police->tcf_lock));
/*
* "rate" pkts "burst" nanoseconds
* ------------ * -------------------
* 1 second 2^6 ticks
*
* ------------------------------------
* NSEC_PER_SEC nanoseconds
* ------------------------
* 2^6 ticks
*
* "rate" pkts "burst" nanoseconds 2^6 ticks
* = ------------ * ------------------- * ------------------------
* 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
*
* "rate" * "burst"
* = ---------------- pkts/nanosecond
* NSEC_PER_SEC^2
*
*
* "rate" * "burst"
* = ---------------- pkts/second
* NSEC_PER_SEC
*/
burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps,
NSEC_PER_SEC);
return burst;
}
static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
......
......@@ -190,6 +190,8 @@ enum {
TCA_POLICE_PAD,
TCA_POLICE_RATE64,
TCA_POLICE_PEAKRATE64,
TCA_POLICE_PKTRATE64,
TCA_POLICE_PKTBURST64,
__TCA_POLICE_MAX
#define TCA_POLICE_RESULT TCA_POLICE_RESULT
};
......
......@@ -42,6 +42,8 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
[TCA_POLICE_RESULT] = { .type = NLA_U32 },
[TCA_POLICE_RATE64] = { .type = NLA_U64 },
[TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 },
[TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 },
[TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 },
};
static int tcf_police_init(struct net *net, struct nlattr *nla,
......@@ -61,6 +63,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
bool exists = false;
u32 index;
u64 rate64, prate64;
u64 pps, ppsburst;
if (nla == NULL)
return -EINVAL;
......@@ -142,6 +145,21 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
}
if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) ||
(!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) {
NL_SET_ERR_MSG(extack,
"Both or neither packet-per-second burst and rate must be provided");
err = -EINVAL;
goto failure;
}
if (tb[TCA_POLICE_PKTRATE64] && R_tab) {
NL_SET_ERR_MSG(extack,
"packet-per-second and byte-per-second rate limits not allowed in same action");
err = -EINVAL;
goto failure;
}
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (unlikely(!new)) {
err = -ENOMEM;
......@@ -183,6 +201,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (tb[TCA_POLICE_AVRATE])
new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
if (tb[TCA_POLICE_PKTRATE64]) {
pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]);
ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]);
new->pps_present = true;
new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst);
psched_ppscfg_precompute(&new->ppsrate, pps);
}
spin_lock_bh(&police->tcf_lock);
spin_lock_bh(&police->tcfp_lock);
police->tcfp_t_c = ktime_get_ns();
......@@ -217,8 +243,8 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_police *police = to_police(a);
s64 now, toks, ppstoks = 0, ptoks = 0;
struct tcf_police_params *p;
s64 now, toks, ptoks = 0;
int ret;
tcf_lastuse_update(&police->tcf_tm);
......@@ -236,7 +262,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
}
if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
if (!p->rate_present) {
if (!p->rate_present && !p->pps_present) {
ret = p->tcfp_result;
goto end;
}
......@@ -251,14 +277,23 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
ptoks -= (s64)psched_l2t_ns(&p->peak,
qdisc_pkt_len(skb));
}
if (p->rate_present) {
toks += police->tcfp_toks;
if (toks > p->tcfp_burst)
toks = p->tcfp_burst;
toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
if ((toks|ptoks) >= 0) {
} else if (p->pps_present) {
ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst);
ppstoks += police->tcfp_pkttoks;
if (ppstoks > p->tcfp_pkt_burst)
ppstoks = p->tcfp_pkt_burst;
ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1);
}
if ((toks | ptoks | ppstoks) >= 0) {
police->tcfp_t_c = now;
police->tcfp_toks = toks;
police->tcfp_ptoks = ptoks;
police->tcfp_pkttoks = ppstoks;
spin_unlock_bh(&police->tcfp_lock);
ret = p->tcfp_result;
goto inc_drops;
......@@ -331,6 +366,16 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
TCA_POLICE_PAD))
goto nla_put_failure;
}
if (p->pps_present) {
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
police->params->ppsrate.rate_pkts_ps,
TCA_POLICE_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
PSCHED_NS2TICKS(p->tcfp_pkt_burst),
TCA_POLICE_PAD))
goto nla_put_failure;
}
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
if (p->tcfp_result &&
......
......@@ -3661,6 +3661,9 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->police.burst = tcf_police_burst(act);
entry->police.rate_bytes_ps =
tcf_police_rate_bytes_ps(act);
entry->police.burst_pkt = tcf_police_burst_pkt(act);
entry->police.rate_pkt_ps =
tcf_police_rate_pkt_ps(act);
entry->police.mtu = tcf_police_tcfp_mtu(act);
entry->police.index = act->tcfa_index;
} else if (is_tcf_ct(act)) {
......
......@@ -1325,16 +1325,15 @@ void dev_shutdown(struct net_device *dev)
WARN_ON(timer_pending(&dev->watchdog_timer));
}
void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64)
{
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
/**
* psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
* @rate: Rate to compute reciprocal division values of
* @mult: Multiplier for reciprocal division
* @shift: Shift for reciprocal division
*
* The multiplier and shift for reciprocal division by rate are stored
* in mult and shift.
*
* The deal here is to replace a divide by a reciprocal one
* in fast path (a reciprocal divide is a multiply and a shift)
*
......@@ -1346,21 +1345,47 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
*
* We try to get the highest possible mult value for accuracy,
* but have to make sure no overflows will ever happen.
*
* reciprocal_value() is not used here it doesn't handle 64-bit values.
*/
if (r->rate_bytes_ps > 0) {
static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
{
u64 factor = NSEC_PER_SEC;
*mult = 1;
*shift = 0;
if (rate <= 0)
return;
for (;;) {
r->mult = div64_u64(factor, r->rate_bytes_ps);
if (r->mult & (1U << 31) || factor & (1ULL << 63))
*mult = div64_u64(factor, rate);
if (*mult & (1U << 31) || factor & (1ULL << 63))
break;
factor <<= 1;
r->shift++;
}
(*shift)++;
}
}
void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64)
{
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
}
EXPORT_SYMBOL(psched_ratecfg_precompute);
void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
{
r->rate_pkts_ps = pktrate64;
psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
}
EXPORT_SYMBOL(psched_ppscfg_precompute);
static void mini_qdisc_rcu_func(struct rcu_head *head)
{
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment