Commit 75bab45e authored by Petr Machata's avatar Petr Machata Committed by Jakub Kicinski

net: nexthop: Add flag to assert that NHGRP reserved fields are zero

There are many unpatched kernel versions out there that do not initialize
the reserved fields of struct nexthop_grp. The issue with that is that if
those fields were to be used for some end (i.e. stop being reserved), old
kernels would still keep sending random data through the field, and a new
userspace could not rely on the value.

In this patch, use the existing NHA_OP_FLAGS, which is currently inbound
only, to carry flags back to the userspace. Add a flag to indicate that the
reserved fields in struct nexthop_grp are zeroed before dumping. This is
reliant on the actual fix from commit 6d745cd0 ("net: nexthop:
Initialize all fields in dumped nexthops").
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Reviewed-by: default avatarIdo Schimmel <idosch@nvidia.com>
Link: https://patch.msgid.link/21037748d4f9d8ff486151f4c09083bcf12d5df8.1723036486.git.petrm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 246ef406
...@@ -33,6 +33,9 @@ enum { ...@@ -33,6 +33,9 @@ enum {
#define NHA_OP_FLAG_DUMP_STATS BIT(0) #define NHA_OP_FLAG_DUMP_STATS BIT(0)
#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1) #define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
/* Response OP_FLAGS. */
#define NHA_OP_FLAG_RESP_GRP_RESVD_0 BIT(31) /* Dump clears resvd fields. */
enum { enum {
NHA_UNSPEC, NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */ NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
......
...@@ -865,7 +865,7 @@ static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh, ...@@ -865,7 +865,7 @@ static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
} }
static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
u32 op_flags) u32 op_flags, u32 *resp_op_flags)
{ {
struct nh_group *nhg = rtnl_dereference(nh->nh_grp); struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p; struct nexthop_grp *p;
...@@ -874,6 +874,8 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh, ...@@ -874,6 +874,8 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
u16 group_type = 0; u16 group_type = 0;
int i; int i;
*resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
if (nhg->hash_threshold) if (nhg->hash_threshold)
group_type = NEXTHOP_GRP_TYPE_MPATH; group_type = NEXTHOP_GRP_TYPE_MPATH;
else if (nhg->resilient) else if (nhg->resilient)
...@@ -934,10 +936,12 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, ...@@ -934,10 +936,12 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nh->is_group) { if (nh->is_group) {
struct nh_group *nhg = rtnl_dereference(nh->nh_grp); struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
u32 resp_op_flags = 0;
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_nh_group(skb, nh, op_flags)) if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
goto nla_put_failure; goto nla_put_failure;
goto out; goto out;
} }
...@@ -1050,7 +1054,9 @@ static size_t nh_nlmsg_size(struct nexthop *nh) ...@@ -1050,7 +1054,9 @@ static size_t nh_nlmsg_size(struct nexthop *nh)
sz += nla_total_size(4); /* NHA_ID */ sz += nla_total_size(4); /* NHA_ID */
if (nh->is_group) if (nh->is_group)
sz += nh_nlmsg_size_grp(nh); sz += nh_nlmsg_size_grp(nh) +
nla_total_size(4) + /* NHA_OP_FLAGS */
0;
else else
sz += nh_nlmsg_size_single(nh); sz += nh_nlmsg_size_single(nh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment