Commit 738fea32 authored by David S. Miller's avatar David S. Miller

Merge branch 'bonding-report-transmit-status-to-callers'

Eric Dumazet says:

====================
bonding: report transmit status to callers

First patches cleanup netpoll, and make sure it provides tx status to its users.

Last patch changes bonding to not pretend packets were sent without error.

By providing more accurate status, TCP stack can avoid adding more
packets if the slave qdisc is already full.

This came while testing latest horizon feature in sch_fq, with
very low pacing rate flows, but should benefit hosts under stress.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a13f98b ae46f184
......@@ -1318,8 +1318,7 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
tx_slave->dev->dev_addr);
}
bond_dev_queue_xmit(bond, skb, tx_slave->dev);
goto out;
return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
}
if (tx_slave && bond->params.tlb_dynamic_lb) {
......@@ -1329,9 +1328,7 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
}
/* no suitable interface, frame not sent */
bond_tx_drop(bond->dev, skb);
out:
return NETDEV_TX_OK;
return bond_tx_drop(bond->dev, skb);
}
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
......
......@@ -287,7 +287,7 @@ const char *bond_mode_name(int mode)
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
*/
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
......@@ -297,9 +297,9 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
if (unlikely(netpoll_tx_running(bond->dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
return dev_queue_xmit(skb);
}
/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
......@@ -3932,7 +3932,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
static netdev_tx_t bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
{
struct list_head *iter;
struct slave *slave;
......@@ -3941,10 +3941,8 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
if (bond_slave_can_tx(slave))
return bond_dev_queue_xmit(bond, skb, slave->dev);
}
}
......@@ -3953,13 +3951,11 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
if (bond_slave_can_tx(slave))
return bond_dev_queue_xmit(bond, skb, slave->dev);
}
/* no slave that can tx has been found */
bond_tx_drop(bond->dev, skb);
return bond_tx_drop(bond->dev, skb);
}
/**
......@@ -4020,10 +4016,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
return NETDEV_TX_OK;
return bond_dev_queue_xmit(bond, skb, slave->dev);
return bond_xmit_slave_id(bond, skb, 0);
}
}
......@@ -4031,11 +4025,9 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
bond_tx_drop(bond_dev, skb);
return bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
}
return NETDEV_TX_OK;
return bond_tx_drop(bond_dev, skb);
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
......@@ -4049,11 +4041,9 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_tx_drop(bond_dev, skb);
return bond_dev_queue_xmit(bond, skb, slave->dev);
return NETDEV_TX_OK;
return bond_tx_drop(bond_dev, skb);
}
/* Use this to update slave_array when (a) it's not appropriate to update
......@@ -4196,12 +4186,9 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
} else {
bond_tx_drop(dev, skb);
return bond_dev_queue_xmit(bond, skb, slave->dev);
}
return NETDEV_TX_OK;
return bond_tx_drop(dev, skb);
}
/* in broadcast mode, we send everything to all usable interfaces. */
......@@ -4227,11 +4214,9 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
}
}
if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_tx_drop(bond_dev, skb);
return bond_dev_queue_xmit(bond, skb, slave->dev);
return NETDEV_TX_OK;
return bond_tx_drop(bond_dev, skb);
}
/*------------------------- Device initialization ---------------------------*/
......@@ -4310,8 +4295,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Should never happen, mode already checked */
netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
WARN_ON_ONCE(1);
bond_tx_drop(dev, skb);
return NETDEV_TX_OK;
return bond_tx_drop(dev, skb);
}
}
......@@ -4330,7 +4314,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond_has_slaves(bond))
ret = __bond_start_xmit(skb, dev);
else
bond_tx_drop(dev, skb);
ret = bond_tx_drop(dev, skb);
rcu_read_unlock();
return ret;
......
......@@ -542,12 +542,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
if (vlan->netpoll)
netpoll_send_skb(vlan->netpoll, skb);
return netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
#endif
return NETDEV_TX_OK;
#endif
}
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
......
......@@ -102,10 +102,7 @@ static inline bool team_port_dev_txable(const struct net_device *port_dev)
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
{
struct netpoll *np = port->np;
if (np)
netpoll_send_skb(np, skb);
netpoll_send_skb(port->np, skb);
}
#else
static inline void team_netpoll_send_skb(struct team_port *port,
......
......@@ -63,15 +63,7 @@ int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
unsigned long flags;
local_irq_save(flags);
netpoll_send_skb_on_dev(np, skb, np->dev);
local_irq_restore(flags);
}
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline void *netpoll_poll_lock(struct napi_struct *napi)
......
......@@ -504,18 +504,17 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void bond_netpoll_send_skb(const struct slave *slave,
static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
struct netpoll *np = slave->np;
if (np)
netpoll_send_skb(np, skb);
return netpoll_send_skb(slave->np, skb);
}
#else
static inline void bond_netpoll_send_skb(const struct slave *slave,
static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
struct sk_buff *skb)
{
BUG();
return NETDEV_TX_OK;
}
#endif
......@@ -609,7 +608,7 @@ struct bond_net {
};
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
......@@ -742,10 +741,11 @@ extern struct bond_parm_tbl ad_select_tbl[];
/* exported from bond_netlink.c */
extern struct rtnl_link_ops bond_link_ops;
static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
atomic_long_inc(&dev->tx_dropped);
dev_kfree_skb_any(skb);
return NET_XMIT_DROP;
}
#endif /* _NET_BONDING_H */
......@@ -88,12 +88,11 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
if (vlan->netpoll)
netpoll_send_skb(vlan->netpoll, skb);
return netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
#endif
return NETDEV_TX_OK;
#endif
}
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
......
......@@ -598,10 +598,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
struct netpoll *np = p->np;
if (np)
netpoll_send_skb(np, skb);
netpoll_send_skb(p->np, skb);
}
int br_netpoll_enable(struct net_bridge_port *p);
......
......@@ -305,20 +305,22 @@ static int netpoll_owner_active(struct net_device *dev)
}
/* call with IRQ disabled */
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev)
static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
netdev_tx_t status = NETDEV_TX_BUSY;
struct net_device *dev;
unsigned long tries;
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
lockdep_assert_irqs_disabled();
npinfo = rcu_dereference_bh(np->dev->npinfo);
dev = np->dev;
npinfo = rcu_dereference_bh(dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
dev_kfree_skb_irq(skb);
return;
return NET_XMIT_DROP;
}
/* don't get messages out of order, and no recursion */
......@@ -357,8 +359,25 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
return NETDEV_TX_OK;
}
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
unsigned long flags;
netdev_tx_t ret;
if (unlikely(!np)) {
dev_kfree_skb_irq(skb);
ret = NET_XMIT_DROP;
} else {
local_irq_save(flags);
ret = __netpoll_send_skb(np, skb);
local_irq_restore(flags);
}
return ret;
}
EXPORT_SYMBOL(netpoll_send_skb_on_dev);
EXPORT_SYMBOL(netpoll_send_skb);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
{
......
......@@ -445,12 +445,11 @@ static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_slave_priv *p = netdev_priv(dev);
if (p->netpoll)
netpoll_send_skb(p->netpoll, skb);
return netpoll_send_skb(p->netpoll, skb);
#else
BUG();
#endif
return NETDEV_TX_OK;
#endif
}
static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment