Commit f9d4b0c1 authored by Roopa Prabhu's avatar Roopa Prabhu Committed by David S. Miller

fib_rules: move common handling of newrule delrule msgs into fib_nl2rule

This reduces code duplication in the fib rule add and del paths.
Get rid of validate_rulemsg. This became obvious when adding duplicate
extack support in fib newrule/delrule error paths.
Signed-off-by: default avatarRoopa Prabhu <roopa@cumulusnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a56e6bcd
...@@ -387,206 +387,185 @@ unsigned int fib_rules_seq_read(struct net *net, int family) ...@@ -387,206 +387,185 @@ unsigned int fib_rules_seq_read(struct net *net, int family)
} }
EXPORT_SYMBOL_GPL(fib_rules_seq_read); EXPORT_SYMBOL_GPL(fib_rules_seq_read);
static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, static struct fib_rule *rule_find(struct fib_rules_ops *ops,
struct fib_rules_ops *ops) struct fib_rule_hdr *frh,
{ struct nlattr **tb,
int err = -EINVAL; struct fib_rule *rule,
bool user_priority)
if (frh->src_len)
if (tb[FRA_SRC] == NULL ||
frh->src_len > (ops->addr_size * 8) ||
nla_len(tb[FRA_SRC]) != ops->addr_size)
goto errout;
if (frh->dst_len)
if (tb[FRA_DST] == NULL ||
frh->dst_len > (ops->addr_size * 8) ||
nla_len(tb[FRA_DST]) != ops->addr_size)
goto errout;
err = 0;
errout:
return err;
}
static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
struct nlattr **tb, struct fib_rule *rule)
{ {
struct fib_rule *r; struct fib_rule *r;
list_for_each_entry(r, &ops->rules_list, list) { list_for_each_entry(r, &ops->rules_list, list) {
if (r->action != rule->action) if (rule->action && r->action != rule->action)
continue; continue;
if (r->table != rule->table) if (rule->table && r->table != rule->table)
continue; continue;
if (r->pref != rule->pref) if (user_priority && r->pref != rule->pref)
continue; continue;
if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) if (rule->iifname[0] &&
memcmp(r->iifname, rule->iifname, IFNAMSIZ))
continue; continue;
if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) if (rule->oifname[0] &&
memcmp(r->oifname, rule->oifname, IFNAMSIZ))
continue; continue;
if (r->mark != rule->mark) if (rule->mark && r->mark != rule->mark)
continue; continue;
if (r->mark_mask != rule->mark_mask) if (rule->mark_mask && r->mark_mask != rule->mark_mask)
continue; continue;
if (r->tun_id != rule->tun_id) if (rule->tun_id && r->tun_id != rule->tun_id)
continue; continue;
if (r->fr_net != rule->fr_net) if (r->fr_net != rule->fr_net)
continue; continue;
if (r->l3mdev != rule->l3mdev) if (rule->l3mdev && r->l3mdev != rule->l3mdev)
continue; continue;
if (!uid_eq(r->uid_range.start, rule->uid_range.start) || if (uid_range_set(&rule->uid_range) &&
!uid_eq(r->uid_range.end, rule->uid_range.end)) (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
!uid_eq(r->uid_range.end, rule->uid_range.end)))
continue; continue;
if (r->ip_proto != rule->ip_proto) if (rule->ip_proto && r->ip_proto != rule->ip_proto)
continue; continue;
if (!fib_rule_port_range_compare(&r->sport_range, if (fib_rule_port_range_set(&rule->sport_range) &&
!fib_rule_port_range_compare(&r->sport_range,
&rule->sport_range)) &rule->sport_range))
continue; continue;
if (!fib_rule_port_range_compare(&r->dport_range, if (fib_rule_port_range_set(&rule->dport_range) &&
!fib_rule_port_range_compare(&r->dport_range,
&rule->dport_range)) &rule->dport_range))
continue; continue;
if (!ops->compare(r, frh, tb)) if (!ops->compare(r, frh, tb))
continue; continue;
return 1; return r;
} }
return 0;
return NULL;
} }
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack,
struct fib_rules_ops *ops,
struct nlattr *tb[],
struct fib_rule **rule,
bool *user_priority)
{ {
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rule_hdr *frh = nlmsg_data(nlh);
struct fib_rules_ops *ops = NULL; struct fib_rule *nlrule = NULL;
struct fib_rule *rule, *r, *last = NULL; int err = -EINVAL;
struct nlattr *tb[FRA_MAX+1];
int err = -EINVAL, unresolved = 0;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
goto errout;
ops = lookup_rules_ops(net, frh->family);
if (ops == NULL) {
err = -EAFNOSUPPORT;
goto errout;
}
err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack); if (frh->src_len)
if (err < 0) if (!tb[FRA_SRC] ||
goto errout; frh->src_len > (ops->addr_size * 8) ||
nla_len(tb[FRA_SRC]) != ops->addr_size)
goto errout;
err = validate_rulemsg(frh, tb, ops); if (frh->dst_len)
if (err < 0) if (!tb[FRA_DST] ||
goto errout; frh->dst_len > (ops->addr_size * 8) ||
nla_len(tb[FRA_DST]) != ops->addr_size)
goto errout;
rule = kzalloc(ops->rule_size, GFP_KERNEL); nlrule = kzalloc(ops->rule_size, GFP_KERNEL);
if (rule == NULL) { if (!nlrule) {
err = -ENOMEM; err = -ENOMEM;
goto errout; goto errout;
} }
refcount_set(&rule->refcnt, 1); refcount_set(&nlrule->refcnt, 1);
rule->fr_net = net; nlrule->fr_net = net;
rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) if (tb[FRA_PRIORITY]) {
: fib_default_rule_pref(ops); nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]);
*user_priority = true;
} else {
nlrule->pref = fib_default_rule_pref(ops);
}
rule->proto = tb[FRA_PROTOCOL] ? nlrule->proto = tb[FRA_PROTOCOL] ?
nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC; nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC;
if (tb[FRA_IIFNAME]) { if (tb[FRA_IIFNAME]) {
struct net_device *dev; struct net_device *dev;
rule->iifindex = -1; nlrule->iifindex = -1;
nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); nla_strlcpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
dev = __dev_get_by_name(net, rule->iifname); dev = __dev_get_by_name(net, nlrule->iifname);
if (dev) if (dev)
rule->iifindex = dev->ifindex; nlrule->iifindex = dev->ifindex;
} }
if (tb[FRA_OIFNAME]) { if (tb[FRA_OIFNAME]) {
struct net_device *dev; struct net_device *dev;
rule->oifindex = -1; nlrule->oifindex = -1;
nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); nla_strlcpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
dev = __dev_get_by_name(net, rule->oifname); dev = __dev_get_by_name(net, nlrule->oifname);
if (dev) if (dev)
rule->oifindex = dev->ifindex; nlrule->oifindex = dev->ifindex;
} }
if (tb[FRA_FWMARK]) { if (tb[FRA_FWMARK]) {
rule->mark = nla_get_u32(tb[FRA_FWMARK]); nlrule->mark = nla_get_u32(tb[FRA_FWMARK]);
if (rule->mark) if (nlrule->mark)
/* compatibility: if the mark value is non-zero all bits /* compatibility: if the mark value is non-zero all bits
* are compared unless a mask is explicitly specified. * are compared unless a mask is explicitly specified.
*/ */
rule->mark_mask = 0xFFFFFFFF; nlrule->mark_mask = 0xFFFFFFFF;
} }
if (tb[FRA_FWMASK]) if (tb[FRA_FWMASK])
rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
if (tb[FRA_TUN_ID]) if (tb[FRA_TUN_ID])
rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
err = -EINVAL; err = -EINVAL;
if (tb[FRA_L3MDEV]) { if (tb[FRA_L3MDEV]) {
#ifdef CONFIG_NET_L3_MASTER_DEV #ifdef CONFIG_NET_L3_MASTER_DEV
rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]); nlrule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
if (rule->l3mdev != 1) if (nlrule->l3mdev != 1)
#endif #endif
goto errout_free; goto errout_free;
} }
rule->action = frh->action; nlrule->action = frh->action;
rule->flags = frh->flags; nlrule->flags = frh->flags;
rule->table = frh_get_table(frh, tb); nlrule->table = frh_get_table(frh, tb);
if (tb[FRA_SUPPRESS_PREFIXLEN]) if (tb[FRA_SUPPRESS_PREFIXLEN])
rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
else else
rule->suppress_prefixlen = -1; nlrule->suppress_prefixlen = -1;
if (tb[FRA_SUPPRESS_IFGROUP]) if (tb[FRA_SUPPRESS_IFGROUP])
rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
else else
rule->suppress_ifgroup = -1; nlrule->suppress_ifgroup = -1;
if (tb[FRA_GOTO]) { if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO) if (nlrule->action != FR_ACT_GOTO)
goto errout_free; goto errout_free;
rule->target = nla_get_u32(tb[FRA_GOTO]); nlrule->target = nla_get_u32(tb[FRA_GOTO]);
/* Backward jumps are prohibited to avoid endless loops */ /* Backward jumps are prohibited to avoid endless loops */
if (rule->target <= rule->pref) if (nlrule->target <= nlrule->pref)
goto errout_free; goto errout_free;
} else if (nlrule->action == FR_ACT_GOTO) {
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref == rule->target) {
RCU_INIT_POINTER(rule->ctarget, r);
break;
}
}
if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
unresolved = 1;
} else if (rule->action == FR_ACT_GOTO)
goto errout_free; goto errout_free;
}
if (rule->l3mdev && rule->table) if (nlrule->l3mdev && nlrule->table)
goto errout_free; goto errout_free;
if (tb[FRA_UID_RANGE]) { if (tb[FRA_UID_RANGE]) {
...@@ -595,34 +574,72 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -595,34 +574,72 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_free; goto errout_free;
} }
rule->uid_range = nla_get_kuid_range(tb); nlrule->uid_range = nla_get_kuid_range(tb);
if (!uid_range_set(&rule->uid_range) || if (!uid_range_set(&nlrule->uid_range) ||
!uid_lte(rule->uid_range.start, rule->uid_range.end)) !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end))
goto errout_free; goto errout_free;
} else { } else {
rule->uid_range = fib_kuid_range_unset; nlrule->uid_range = fib_kuid_range_unset;
} }
if (tb[FRA_IP_PROTO]) if (tb[FRA_IP_PROTO])
rule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]); nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
if (tb[FRA_SPORT_RANGE]) { if (tb[FRA_SPORT_RANGE]) {
err = nla_get_port_range(tb[FRA_SPORT_RANGE], err = nla_get_port_range(tb[FRA_SPORT_RANGE],
&rule->sport_range); &nlrule->sport_range);
if (err) if (err)
goto errout_free; goto errout_free;
} }
if (tb[FRA_DPORT_RANGE]) { if (tb[FRA_DPORT_RANGE]) {
err = nla_get_port_range(tb[FRA_DPORT_RANGE], err = nla_get_port_range(tb[FRA_DPORT_RANGE],
&rule->dport_range); &nlrule->dport_range);
if (err) if (err)
goto errout_free; goto errout_free;
} }
*rule = nlrule;
return 0;
errout_free:
kfree(nlrule);
errout:
return err;
}
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh);
struct fib_rules_ops *ops = NULL;
struct fib_rule *rule = NULL, *r, *last = NULL;
struct nlattr *tb[FRA_MAX + 1];
int err = -EINVAL, unresolved = 0;
bool user_priority = false;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
goto errout;
ops = lookup_rules_ops(net, frh->family);
if (!ops) {
err = -EAFNOSUPPORT;
goto errout;
}
err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, extack);
if (err < 0)
goto errout;
err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority);
if (err)
goto errout;
if ((nlh->nlmsg_flags & NLM_F_EXCL) && if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
rule_exists(ops, frh, tb, rule)) { rule_find(ops, frh, tb, rule, user_priority)) {
err = -EEXIST; err = -EEXIST;
goto errout_free; goto errout_free;
} }
...@@ -636,6 +653,16 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -636,6 +653,16 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0) if (err < 0)
goto errout_free; goto errout_free;
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref == rule->target) {
RCU_INIT_POINTER(rule->ctarget, r);
break;
}
}
if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
unresolved = 1;
list_for_each_entry(r, &ops->rules_list, list) { list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref > rule->pref) if (r->pref > rule->pref)
break; break;
...@@ -690,13 +717,11 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -690,13 +717,11 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
{ {
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rule_hdr *frh = nlmsg_data(nlh);
struct fib_rule_port_range sprange = {0, 0};
struct fib_rule_port_range dprange = {0, 0};
struct fib_rules_ops *ops = NULL; struct fib_rules_ops *ops = NULL;
struct fib_rule *rule, *r; struct fib_rule *rule = NULL, *r, *nlrule = NULL;
struct nlattr *tb[FRA_MAX+1]; struct nlattr *tb[FRA_MAX+1];
struct fib_kuid_range range;
int err = -EINVAL; int err = -EINVAL;
bool user_priority = false;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
goto errout; goto errout;
...@@ -711,150 +736,73 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -711,150 +736,73 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0) if (err < 0)
goto errout; goto errout;
err = validate_rulemsg(frh, tb, ops); err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority);
if (err < 0) if (err)
goto errout; goto errout;
if (tb[FRA_UID_RANGE]) { rule = rule_find(ops, frh, tb, nlrule, user_priority);
range = nla_get_kuid_range(tb); if (!rule) {
if (!uid_range_set(&range)) { err = -ENOENT;
err = -EINVAL; goto errout;
goto errout;
}
} else {
range = fib_kuid_range_unset;
} }
if (tb[FRA_SPORT_RANGE]) { if (rule->flags & FIB_RULE_PERMANENT) {
err = nla_get_port_range(tb[FRA_SPORT_RANGE], err = -EPERM;
&sprange); goto errout;
if (err)
goto errout;
} }
if (tb[FRA_DPORT_RANGE]) { if (ops->delete) {
err = nla_get_port_range(tb[FRA_DPORT_RANGE], err = ops->delete(rule);
&dprange);
if (err) if (err)
goto errout; goto errout;
} }
list_for_each_entry(rule, &ops->rules_list, list) { if (rule->tun_id)
if (tb[FRA_PROTOCOL] && ip_tunnel_unneed_metadata();
(rule->proto != nla_get_u8(tb[FRA_PROTOCOL])))
continue;
if (frh->action && (frh->action != rule->action))
continue;
if (frh_get_table(frh, tb) &&
(frh_get_table(frh, tb) != rule->table))
continue;
if (tb[FRA_PRIORITY] &&
(rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
continue;
if (tb[FRA_IIFNAME] &&
nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
continue;
if (tb[FRA_OIFNAME] &&
nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
continue;
if (tb[FRA_FWMARK] &&
(rule->mark != nla_get_u32(tb[FRA_FWMARK])))
continue;
if (tb[FRA_FWMASK] &&
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
continue;
if (tb[FRA_TUN_ID] &&
(rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
continue;
if (tb[FRA_L3MDEV] &&
(rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
continue;
if (uid_range_set(&range) &&
(!uid_eq(rule->uid_range.start, range.start) ||
!uid_eq(rule->uid_range.end, range.end)))
continue;
if (tb[FRA_IP_PROTO] &&
(rule->ip_proto != nla_get_u8(tb[FRA_IP_PROTO])))
continue;
if (fib_rule_port_range_set(&sprange) &&
!fib_rule_port_range_compare(&rule->sport_range, &sprange))
continue;
if (fib_rule_port_range_set(&dprange) &&
!fib_rule_port_range_compare(&rule->dport_range, &dprange))
continue;
if (!ops->compare(rule, frh, tb))
continue;
if (rule->flags & FIB_RULE_PERMANENT) {
err = -EPERM;
goto errout;
}
if (ops->delete) {
err = ops->delete(rule);
if (err)
goto errout;
}
if (rule->tun_id)
ip_tunnel_unneed_metadata();
list_del_rcu(&rule->list); list_del_rcu(&rule->list);
if (rule->action == FR_ACT_GOTO) { if (rule->action == FR_ACT_GOTO) {
ops->nr_goto_rules--; ops->nr_goto_rules--;
if (rtnl_dereference(rule->ctarget) == NULL) if (rtnl_dereference(rule->ctarget) == NULL)
ops->unresolved_rules--; ops->unresolved_rules--;
} }
/* /*
* Check if this rule is a target to any of them. If so, * Check if this rule is a target to any of them. If so,
* adjust to the next one with the same preference or * adjust to the next one with the same preference or
* disable them. As this operation is eventually very * disable them. As this operation is eventually very
* expensive, it is only performed if goto rules, except * expensive, it is only performed if goto rules, except
* current if it is goto rule, have actually been added. * current if it is goto rule, have actually been added.
*/ */
if (ops->nr_goto_rules > 0) { if (ops->nr_goto_rules > 0) {
struct fib_rule *n; struct fib_rule *n;
n = list_next_entry(rule, list); n = list_next_entry(rule, list);
if (&n->list == &ops->rules_list || n->pref != rule->pref) if (&n->list == &ops->rules_list || n->pref != rule->pref)
n = NULL; n = NULL;
list_for_each_entry(r, &ops->rules_list, list) { list_for_each_entry(r, &ops->rules_list, list) {
if (rtnl_dereference(r->ctarget) != rule) if (rtnl_dereference(r->ctarget) != rule)
continue; continue;
rcu_assign_pointer(r->ctarget, n); rcu_assign_pointer(r->ctarget, n);
if (!n) if (!n)
ops->unresolved_rules++; ops->unresolved_rules++;
}
} }
call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
NULL);
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
NETLINK_CB(skb).portid);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
return 0;
} }
err = -ENOENT; call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops,
NULL);
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
NETLINK_CB(skb).portid);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
kfree(nlrule);
return 0;
errout: errout:
if (nlrule)
kfree(nlrule);
rules_ops_put(ops); rules_ops_put(ops);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment