Commit 2e422069 authored by David S. Miller's avatar David S. Miller

Merge branch 'bridge_flags'

Vlad Yasevich says:

====================
The following series adds 2 new flags to bridge.  One flag allows
the user to control whether mac learning is performed on the interface
or not.  By default mac learning is on.
The other flag allows the user to control whether unicast traffic
is flooded (send without an fdb) to a given unicast port.  Default is
on.

Changes since v4:
 - Implemented Stephen's suggestions.

Changes since v2:
 - removed unused "unlock" tag.

Changes since v1:
 - Integrated suggestion from MST to not impact RTM_NEWNEIGH and to
   skip lookups when learning is disabled.

Vlad Yasevich (2):
  bridge: Add flag to control mac learning.
  bridge: Add a flag to control unicast packet flood.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 30f3a40f 867a5943
...@@ -221,6 +221,8 @@ enum { ...@@ -221,6 +221,8 @@ enum {
IFLA_BRPORT_GUARD, /* bpdu guard */ IFLA_BRPORT_GUARD, /* bpdu guard */
IFLA_BRPORT_PROTECT, /* root port protection */ IFLA_BRPORT_PROTECT, /* root port protection */
IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */
IFLA_BRPORT_LEARNING, /* mac learning */
IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */
__IFLA_BRPORT_MAX __IFLA_BRPORT_MAX
}; };
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
......
...@@ -58,10 +58,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -58,10 +58,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb, ETH_HLEN); skb_pull(skb, ETH_HLEN);
if (is_broadcast_ether_addr(dest)) if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb); br_flood_deliver(br, skb, false);
else if (is_multicast_ether_addr(dest)) { else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) { if (unlikely(netpoll_tx_running(dev))) {
br_flood_deliver(br, skb); br_flood_deliver(br, skb, false);
goto out; goto out;
} }
if (br_multicast_rcv(br, NULL, skb)) { if (br_multicast_rcv(br, NULL, skb)) {
...@@ -73,11 +73,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -73,11 +73,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
br_multicast_deliver(mdst, skb); br_multicast_deliver(mdst, skb);
else else
br_flood_deliver(br, skb); br_flood_deliver(br, skb, false);
} else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
br_deliver(dst->dst, skb); br_deliver(dst->dst, skb);
else else
br_flood_deliver(br, skb); br_flood_deliver(br, skb, true);
out: out:
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -174,7 +174,8 @@ static struct net_bridge_port *maybe_deliver( ...@@ -174,7 +174,8 @@ static struct net_bridge_port *maybe_deliver(
static void br_flood(struct net_bridge *br, struct sk_buff *skb, static void br_flood(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb0, struct sk_buff *skb0,
void (*__packet_hook)(const struct net_bridge_port *p, void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb)) struct sk_buff *skb),
bool unicast)
{ {
struct net_bridge_port *p; struct net_bridge_port *p;
struct net_bridge_port *prev; struct net_bridge_port *prev;
...@@ -182,6 +183,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, ...@@ -182,6 +183,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
prev = NULL; prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) { list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
prev = maybe_deliver(prev, p, skb, __packet_hook); prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev)) if (IS_ERR(prev))
goto out; goto out;
...@@ -203,16 +207,16 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, ...@@ -203,16 +207,16 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
/* called with rcu_read_lock */ /* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
{ {
br_flood(br, skb, NULL, __br_deliver); br_flood(br, skb, NULL, __br_deliver, unicast);
} }
/* called under bridge lock */ /* called under bridge lock */
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2) struct sk_buff *skb2, bool unicast)
{ {
br_flood(br, skb, skb2, __br_forward); br_flood(br, skb, skb2, __br_forward, unicast);
} }
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
......
...@@ -221,7 +221,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, ...@@ -221,7 +221,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->path_cost = port_cost(dev); p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS; p->priority = 0x8000 >> BR_PORT_BITS;
p->port_no = index; p->port_no = index;
p->flags = 0; p->flags = BR_LEARNING | BR_FLOOD;
br_init_port(p); br_init_port(p);
p->state = BR_STATE_DISABLED; p->state = BR_STATE_DISABLED;
br_stp_port_timer_init(p); br_stp_port_timer_init(p);
......
...@@ -65,6 +65,7 @@ int br_handle_frame_finish(struct sk_buff *skb) ...@@ -65,6 +65,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
struct net_bridge_fdb_entry *dst; struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst; struct net_bridge_mdb_entry *mdst;
struct sk_buff *skb2; struct sk_buff *skb2;
bool unicast = true;
u16 vid = 0; u16 vid = 0;
if (!p || p->state == BR_STATE_DISABLED) if (!p || p->state == BR_STATE_DISABLED)
...@@ -75,7 +76,8 @@ int br_handle_frame_finish(struct sk_buff *skb) ...@@ -75,7 +76,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
/* insert into forwarding database after filtering to avoid spoofing */ /* insert into forwarding database after filtering to avoid spoofing */
br = p->br; br = p->br;
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb)) br_multicast_rcv(br, p, skb))
...@@ -94,9 +96,10 @@ int br_handle_frame_finish(struct sk_buff *skb) ...@@ -94,9 +96,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL; dst = NULL;
if (is_broadcast_ether_addr(dest)) if (is_broadcast_ether_addr(dest)) {
skb2 = skb; skb2 = skb;
else if (is_multicast_ether_addr(dest)) { unicast = false;
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid); mdst = br_mdb_get(br, skb, vid);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
if ((mdst && mdst->mglist) || if ((mdst && mdst->mglist) ||
...@@ -109,6 +112,7 @@ int br_handle_frame_finish(struct sk_buff *skb) ...@@ -109,6 +112,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
} else } else
skb2 = skb; skb2 = skb;
unicast = false;
br->dev->stats.multicast++; br->dev->stats.multicast++;
} else if ((dst = __br_fdb_get(br, dest, vid)) && } else if ((dst = __br_fdb_get(br, dest, vid)) &&
dst->is_local) { dst->is_local) {
...@@ -122,7 +126,7 @@ int br_handle_frame_finish(struct sk_buff *skb) ...@@ -122,7 +126,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst->used = jiffies; dst->used = jiffies;
br_forward(dst->dst, skb, skb2); br_forward(dst->dst, skb, skb2);
} else } else
br_flood_forward(br, skb, skb2); br_flood_forward(br, skb, skb2, unicast);
} }
if (skb2) if (skb2)
...@@ -142,7 +146,8 @@ static int br_handle_local_finish(struct sk_buff *skb) ...@@ -142,7 +146,8 @@ static int br_handle_local_finish(struct sk_buff *skb)
u16 vid = 0; u16 vid = 0;
br_vlan_get_tag(skb, &vid); br_vlan_get_tag(skb, &vid);
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid); if (p->flags & BR_LEARNING)
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
return 0; /* process further */ return 0; /* process further */
} }
......
...@@ -30,6 +30,8 @@ static inline size_t br_port_info_size(void) ...@@ -30,6 +30,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_GUARD */ + nla_total_size(1) /* IFLA_BRPORT_GUARD */
+ nla_total_size(1) /* IFLA_BRPORT_PROTECT */ + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ 0; + 0;
} }
...@@ -56,7 +58,9 @@ static int br_port_fill_attrs(struct sk_buff *skb, ...@@ -56,7 +58,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u8(skb, IFLA_BRPORT_MODE, mode) || nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) || nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE))) nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
...@@ -281,6 +285,8 @@ static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { ...@@ -281,6 +285,8 @@ static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_MODE] = { .type = NLA_U8 }, [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
[IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
}; };
/* Change the state of the port and notify spanning tree */ /* Change the state of the port and notify spanning tree */
...@@ -328,6 +334,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) ...@@ -328,6 +334,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
if (tb[IFLA_BRPORT_COST]) { if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
......
...@@ -158,6 +158,8 @@ struct net_bridge_port ...@@ -158,6 +158,8 @@ struct net_bridge_port
#define BR_ROOT_BLOCK 0x00000004 #define BR_ROOT_BLOCK 0x00000004
#define BR_MULTICAST_FAST_LEAVE 0x00000008 #define BR_MULTICAST_FAST_LEAVE 0x00000008
#define BR_ADMIN_COST 0x00000010 #define BR_ADMIN_COST 0x00000010
#define BR_LEARNING 0x00000020
#define BR_FLOOD 0x00000040
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
u32 multicast_startup_queries_sent; u32 multicast_startup_queries_sent;
...@@ -413,9 +415,10 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb); ...@@ -413,9 +415,10 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb);
extern void br_forward(const struct net_bridge_port *to, extern void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, struct sk_buff *skb0); struct sk_buff *skb, struct sk_buff *skb0);
extern int br_forward_finish(struct sk_buff *skb); extern int br_forward_finish(struct sk_buff *skb);
extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
bool unicast);
extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2); struct sk_buff *skb2, bool unicast);
/* br_if.c */ /* br_if.c */
extern void br_port_carrier_check(struct net_bridge_port *p); extern void br_port_carrier_check(struct net_bridge_port *p);
......
...@@ -158,6 +158,8 @@ static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush); ...@@ -158,6 +158,8 @@ static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE); BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
...@@ -195,6 +197,8 @@ static const struct brport_attribute *brport_attrs[] = { ...@@ -195,6 +197,8 @@ static const struct brport_attribute *brport_attrs[] = {
&brport_attr_hairpin_mode, &brport_attr_hairpin_mode,
&brport_attr_bpdu_guard, &brport_attr_bpdu_guard,
&brport_attr_root_block, &brport_attr_root_block,
&brport_attr_learning,
&brport_attr_unicast_flood,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&brport_attr_multicast_router, &brport_attr_multicast_router,
&brport_attr_multicast_fast_leave, &brport_attr_multicast_fast_leave,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment