Commit 72111015 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: sched: gred: allow manipulating per-DP RED flags

Allow users to set and dump RED flags (ECN enabled and harddrop)
on per-virtual queue basis.  Validation of attributes is split
from changes to make sure we won't have to undo previous operations
when we find out configuration is invalid.

The objective is to allow changing per-Qdisc parameters without
overwriting the per-vq configured flags.

Old user space will not pass the TCA_GRED_VQ_FLAGS attribute and
per-Qdisc flags will always get propagated to the virtual queues.

New user space which wants to make use of per-vq flags should set
per-Qdisc flags to 0 and then configure per-vq flags as it
sees fit.  Once per-vq flags are set per-Qdisc flags can't be
changed to non-zero.  Vice versa - if the per-Qdisc flags are
non-zero the TCA_GRED_VQ_FLAGS attribute has to either be omitted
or set to the same value as per-Qdisc flags.

Update per-Qdisc parameters:
per-Qdisc | per-VQ | result
        0 |      0 | all vq flags updated
	0 |  non-0 | error (vq flags in use)
    non-0 |      0 | -- impossible --
    non-0 |  non-0 | all vq flags updated

Update per-VQ state (flags parameter not specified):
   no change to flags

Update per-VQ state (flags parameter set):
per-Qdisc | per-VQ | result
        0 |   any  | per-vq flags updated
    non-0 |      0 | -- impossible --
    non-0 |  non-0 | error (per-Qdisc flags in use)
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarJohn Hurley <john.hurley@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 25fc1989
...@@ -317,6 +317,7 @@ enum { ...@@ -317,6 +317,7 @@ enum {
TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
TCA_GRED_VQ_STAT_PDROP, /* u32 */ TCA_GRED_VQ_STAT_PDROP, /* u32 */
TCA_GRED_VQ_STAT_OTHER, /* u32 */ TCA_GRED_VQ_STAT_OTHER, /* u32 */
TCA_GRED_VQ_FLAGS, /* u32 */
__TCA_GRED_VQ_MAX __TCA_GRED_VQ_MAX
}; };
......
...@@ -152,6 +152,19 @@ static int gred_use_harddrop(struct gred_sched_data *q) ...@@ -152,6 +152,19 @@ static int gred_use_harddrop(struct gred_sched_data *q)
return q->red_flags & TC_RED_HARDDROP; return q->red_flags & TC_RED_HARDDROP;
} }
static bool gred_per_vq_red_flags_used(struct gred_sched *table)
{
unsigned int i;
/* Local per-vq flags couldn't have been set unless global are 0 */
if (table->red_flags)
return false;
for (i = 0; i < MAX_DPs; i++)
if (table->tab[i] && table->tab[i]->red_flags)
return true;
return false;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
...@@ -329,6 +342,10 @@ static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, ...@@ -329,6 +342,10 @@ static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count"); NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
return -EINVAL; return -EINVAL;
} }
if (sopt->flags && gred_per_vq_red_flags_used(table)) {
NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
return -EINVAL;
}
sch_tree_lock(sch); sch_tree_lock(sch);
table->DPs = sopt->DPs; table->DPs = sopt->DPs;
...@@ -410,15 +427,127 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, ...@@ -410,15 +427,127 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
return 0; return 0;
} }
static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
[TCA_GRED_VQ_DP] = { .type = NLA_U32 },
[TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
};
static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
[TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
};
static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
[TCA_GRED_STAB] = { .len = 256 }, [TCA_GRED_STAB] = { .len = 256 },
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 }, [TCA_GRED_MAX_P] = { .type = NLA_U32 },
[TCA_GRED_LIMIT] = { .type = NLA_U32 }, [TCA_GRED_LIMIT] = { .type = NLA_U32 },
[TCA_GRED_VQ_LIST] = { .type = NLA_REJECT }, [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
}; };
static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
{
struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
u32 dp;
nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, NULL);
dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
if (tb[TCA_GRED_VQ_FLAGS])
table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
}
static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
{
const struct nlattr *attr;
int rem;
nla_for_each_nested(attr, vqs, rem) {
switch (nla_type(attr)) {
case TCA_GRED_VQ_ENTRY:
gred_vq_apply(table, attr);
break;
}
}
}
static int gred_vq_validate(struct gred_sched *table, u32 cdp,
const struct nlattr *entry,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
int err;
u32 dp;
err = nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy,
extack);
if (err < 0)
return err;
if (!tb[TCA_GRED_VQ_DP]) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
return -EINVAL;
}
dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
if (dp >= table->DPs) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
return -EINVAL;
}
if (dp != cdp && !table->tab[dp]) {
NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
return -EINVAL;
}
if (tb[TCA_GRED_VQ_FLAGS]) {
u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
if (table->red_flags && table->red_flags != red_flags) {
NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
return -EINVAL;
}
if (red_flags & ~GRED_VQ_RED_FLAGS) {
NL_SET_ERR_MSG_MOD(extack,
"invalid RED flags specified");
return -EINVAL;
}
}
return 0;
}
static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
struct nlattr *vqs, struct netlink_ext_ack *extack)
{
const struct nlattr *attr;
int rem, err;
err = nla_validate_nested(vqs, TCA_GRED_VQ_ENTRY_MAX,
gred_vqe_policy, extack);
if (err < 0)
return err;
nla_for_each_nested(attr, vqs, rem) {
switch (nla_type(attr)) {
case TCA_GRED_VQ_ENTRY:
err = gred_vq_validate(table, cdp, attr, extack);
if (err)
return err;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
return -EINVAL;
}
}
if (rem > 0) {
NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
return -EINVAL;
}
return 0;
}
static int gred_change(struct Qdisc *sch, struct nlattr *opt, static int gred_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
...@@ -460,6 +589,13 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -460,6 +589,13 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL; return -EINVAL;
} }
if (tb[TCA_GRED_VQ_LIST]) {
err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
extack);
if (err)
return err;
}
if (gred_rio_mode(table)) { if (gred_rio_mode(table)) {
if (ctl->prio == 0) { if (ctl->prio == 0) {
int def_prio = GRED_DEF_PRIO; int def_prio = GRED_DEF_PRIO;
...@@ -483,6 +619,9 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -483,6 +619,9 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0) if (err < 0)
goto err_unlock_free; goto err_unlock_free;
if (tb[TCA_GRED_VQ_LIST])
gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
if (gred_rio_mode(table)) { if (gred_rio_mode(table)) {
gred_disable_wred_mode(table); gred_disable_wred_mode(table);
if (gred_wred_mode_check(sch)) if (gred_wred_mode_check(sch))
...@@ -627,6 +766,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -627,6 +766,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
goto nla_put_failure;
/* Stats */ /* Stats */
if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
TCA_GRED_VQ_PAD)) TCA_GRED_VQ_PAD))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment