Commit 451c9fc7 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mellanox/mlx5-next' into rdma.git for-next

From the mlx5-next branch at
  git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Required for dependencies in following patches

* mellanox/mlx5-next:
  net/mlx5: Add support to get lag physical port
  net/mlx5: Change lag mutex lock to spin lock
  bonding: Implement ndo_get_xmit_slave
  bonding: Add array of all slaves
  bonding: Add function to get the xmit slave in active-backup mode
  bonding: Add helper function to get the xmit slave in rr mode
  bonding: Add helper function to get the xmit slave based on hash
  bonding/alb: Add helper functions to get the xmit slave
  bonding: Rename slave_arr to usable_slaves
  bonding: Export skip slave logic to function
  net/core: Introduce netdev_get_xmit_slave
parents 0eacc574 c6bc6041
......@@ -1334,11 +1334,11 @@ static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
return NETDEV_TX_OK;
}
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
struct slave *tx_slave = NULL;
struct ethhdr *eth_data;
u32 hash_index;
skb_reset_mac_header(skb);
......@@ -1360,7 +1360,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
......@@ -1369,20 +1369,29 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
}
return bond_do_alb_xmit(skb, bond, tx_slave);
return tx_slave;
}
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethhdr *eth_data;
struct slave *tx_slave;
tx_slave = bond_xmit_tlb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
struct slave *tx_slave = NULL;
const u8 *hash_start = NULL;
bool do_tx_balance = true;
struct ethhdr *eth_data;
u32 hash_index = 0;
const u8 *hash_start = NULL;
int hash_size = 0;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
......@@ -1494,14 +1503,22 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct bond_up_slave *slaves;
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
slaves = rcu_dereference(bond->usable_slaves);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
count];
}
}
return tx_slave;
}
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *tx_slave = NULL;
tx_slave = bond_xmit_alb_slave_get(bond, skb);
return bond_do_alb_xmit(skb, bond, tx_slave);
}
......
......@@ -3923,16 +3923,15 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/**
* bond_xmit_slave_id - transmit skb through slave with slave_id
* bond_get_slave_by_id - get xmit slave with slave_id
* @bond: bonding device that is transmitting
* @skb: buffer to transmit
* @slave_id: slave id up to slave_cnt-1 through which to transmit
*
* This function tries to transmit through slave with slave_id but in case
* This function tries to get slave with slave_id but in case
* it fails, it tries to find the first available slave for transmission.
* The skb is consumed in all cases, thus the function is void.
*/
static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
static struct slave *bond_get_slave_by_id(struct bonding *bond,
int slave_id)
{
struct list_head *iter;
struct slave *slave;
......@@ -3941,10 +3940,8 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
if (bond_slave_can_tx(slave))
return slave;
}
}
......@@ -3953,13 +3950,11 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
if (bond_slave_can_tx(slave))
return slave;
}
/* no slave that can tx has been found */
bond_tx_drop(bond->dev, skb);
return NULL;
}
/**
......@@ -3995,10 +3990,9 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
return slave_id;
}
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int slave_cnt;
u32 slave_id;
......@@ -4020,24 +4014,40 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
if (iph->protocol == IPPROTO_IGMP) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
return NETDEV_TX_OK;
return slave;
return bond_get_slave_by_id(bond, 0);
}
}
non_igmp:
slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
} else {
bond_tx_drop(bond_dev, skb);
slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
return bond_get_slave_by_id(bond, slave_id);
}
return NULL;
}
static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
slave = bond_xmit_roundrobin_slave_get(bond, skb);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_tx_drop(bond_dev, skb);
return NETDEV_TX_OK;
}
static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
struct sk_buff *skb)
{
return rcu_dereference(bond->curr_active_slave);
}
/* In active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
*/
......@@ -4047,7 +4057,7 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
slave = rcu_dereference(bond->curr_active_slave);
slave = bond_xmit_activebackup_slave_get(bond, skb);
if (slave)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
......@@ -4087,6 +4097,61 @@ static void bond_slave_arr_handler(struct work_struct *work)
bond_slave_arr_work_rearm(bond, 1);
}
static void bond_skip_slave(struct bond_up_slave *slaves,
struct slave *skipslave)
{
int idx;
/* Rare situation where caller has asked to skip a specific
* slave but allocation failed (most likely!). BTW this is
* only possible when the call is initiated from
* __bond_release_one(). In this situation; overwrite the
* skipslave entry in the array with the last entry from the
* array to avoid a situation where the xmit path may choose
* this to-be-skipped slave to send a packet out.
*/
for (idx = 0; slaves && idx < slaves->count; idx++) {
if (skipslave == slaves->arr[idx]) {
slaves->arr[idx] =
slaves->arr[slaves->count - 1];
slaves->count--;
break;
}
}
}
static void bond_set_slave_arr(struct bonding *bond,
struct bond_up_slave *usable_slaves,
struct bond_up_slave *all_slaves)
{
struct bond_up_slave *usable, *all;
usable = rtnl_dereference(bond->usable_slaves);
rcu_assign_pointer(bond->usable_slaves, usable_slaves);
kfree_rcu(usable, rcu);
all = rtnl_dereference(bond->all_slaves);
rcu_assign_pointer(bond->all_slaves, all_slaves);
kfree_rcu(all, rcu);
}
static void bond_reset_slave_arr(struct bonding *bond)
{
struct bond_up_slave *usable, *all;
usable = rtnl_dereference(bond->usable_slaves);
if (usable) {
RCU_INIT_POINTER(bond->usable_slaves, NULL);
kfree_rcu(usable, rcu);
}
all = rtnl_dereference(bond->all_slaves);
if (all) {
RCU_INIT_POINTER(bond->all_slaves, NULL);
kfree_rcu(all, rcu);
}
}
/* Build the usable slaves array in control path for modes that use xmit-hash
* to determine the slave interface -
* (a) BOND_MODE_8023AD
......@@ -4097,9 +4162,9 @@ static void bond_slave_arr_handler(struct work_struct *work)
*/
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
{
struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
struct slave *slave;
struct list_head *iter;
struct bond_up_slave *new_arr, *old_arr;
int agg_id = 0;
int ret = 0;
......@@ -4107,11 +4172,12 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
WARN_ON(lockdep_is_held(&bond->mode_lock));
#endif
new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
GFP_KERNEL);
if (!new_arr) {
usable_slaves = kzalloc(struct_size(usable_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
all_slaves = kzalloc(struct_size(all_slaves, arr,
bond->slave_cnt), GFP_KERNEL);
if (!usable_slaves || !all_slaves) {
ret = -ENOMEM;
pr_err("Failed to build slave-array.\n");
goto out;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
......@@ -4119,20 +4185,19 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("bond_3ad_get_active_agg_info failed\n");
kfree_rcu(new_arr, rcu);
/* No active aggragator means it's not safe to use
* the previous array.
*/
old_arr = rtnl_dereference(bond->slave_arr);
if (old_arr) {
RCU_INIT_POINTER(bond->slave_arr, NULL);
kfree_rcu(old_arr, rcu);
}
bond_reset_slave_arr(bond);
goto out;
}
agg_id = ad_info.aggregator_id;
}
bond_for_each_slave(bond, slave, iter) {
if (skipslave == slave)
continue;
all_slaves->arr[all_slaves->count++] = slave;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct aggregator *agg;
......@@ -4142,44 +4207,45 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
}
if (!bond_slave_can_tx(slave))
continue;
if (skipslave == slave)
continue;
slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
new_arr->count);
usable_slaves->count);
new_arr->arr[new_arr->count++] = slave;
usable_slaves->arr[usable_slaves->count++] = slave;
}
old_arr = rtnl_dereference(bond->slave_arr);
rcu_assign_pointer(bond->slave_arr, new_arr);
if (old_arr)
kfree_rcu(old_arr, rcu);
bond_set_slave_arr(bond, usable_slaves, all_slaves);
return ret;
out:
if (ret != 0 && skipslave) {
int idx;
/* Rare situation where caller has asked to skip a specific
* slave but allocation failed (most likely!). BTW this is
* only possible when the call is initiated from
* __bond_release_one(). In this situation; overwrite the
* skipslave entry in the array with the last entry from the
* array to avoid a situation where the xmit path may choose
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
old_arr->count--;
break;
}
}
bond_skip_slave(rtnl_dereference(bond->all_slaves),
skipslave);
bond_skip_slave(rtnl_dereference(bond->usable_slaves),
skipslave);
}
kfree_rcu(all_slaves, rcu);
kfree_rcu(usable_slaves, rcu);
return ret;
}
static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
struct sk_buff *skb,
struct bond_up_slave *slaves)
{
struct slave *slave;
unsigned int count;
u32 hash;
hash = bond_xmit_hash(bond, skb);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (unlikely(!count))
return NULL;
slave = slaves->arr[hash % count];
return slave;
}
/* Use this Xmit function for 3AD as well as XOR modes. The current
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
......@@ -4188,18 +4254,15 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
struct bond_up_slave *slaves;
unsigned int count;
struct slave *slave;
slaves = rcu_dereference(bond->slave_arr);
count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
slaves = rcu_dereference(bond->usable_slaves);
slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
if (likely(slave))
bond_dev_queue_xmit(bond, skb, slave->dev);
} else {
else
bond_tx_drop(dev, skb);
}
return NETDEV_TX_OK;
}
......@@ -4284,6 +4347,48 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
struct sk_buff *skb,
bool all_slaves)
{
struct bonding *bond = netdev_priv(master_dev);
struct bond_up_slave *slaves;
struct slave *slave = NULL;
switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
slave = bond_xmit_roundrobin_slave_get(bond, skb);
break;
case BOND_MODE_ACTIVEBACKUP:
slave = bond_xmit_activebackup_slave_get(bond, skb);
break;
case BOND_MODE_8023AD:
case BOND_MODE_XOR:
if (all_slaves)
slaves = rcu_dereference(bond->all_slaves);
else
slaves = rcu_dereference(bond->usable_slaves);
slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
break;
case BOND_MODE_BROADCAST:
break;
case BOND_MODE_ALB:
slave = bond_xmit_alb_slave_get(bond, skb);
break;
case BOND_MODE_TLB:
slave = bond_xmit_tlb_slave_get(bond, skb);
break;
default:
/* Should never happen, mode already checked */
WARN_ONCE(true, "Unknown bonding mode");
break;
}
if (slave)
return slave->dev;
return NULL;
}
static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
......@@ -4405,6 +4510,7 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_features_check = passthru_features_check,
.ndo_get_xmit_slave = bond_xmit_get_slave,
};
static const struct device_type bond_type = {
......@@ -4472,9 +4578,9 @@ void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct bond_up_slave *usable, *all;
struct list_head *iter;
struct slave *slave;
struct bond_up_slave *arr;
bond_netpoll_cleanup(bond_dev);
......@@ -4483,10 +4589,16 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
arr = rtnl_dereference(bond->slave_arr);
if (arr) {
RCU_INIT_POINTER(bond->slave_arr, NULL);
kfree_rcu(arr, rcu);
usable = rtnl_dereference(bond->usable_slaves);
if (usable) {
RCU_INIT_POINTER(bond->usable_slaves, NULL);
kfree_rcu(usable, rcu);
}
all = rtnl_dereference(bond->all_slaves);
if (all) {
RCU_INIT_POINTER(bond->all_slaves, NULL);
kfree_rcu(all, rcu);
}
list_del(&bond->bond_list);
......
......@@ -42,7 +42,7 @@
* Beware of lock dependencies (preferably, no locks should be acquired
* under it).
*/
static DEFINE_MUTEX(lag_mutex);
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
u8 remap_port2)
......@@ -274,9 +274,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!dev0 || !dev1)
return;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
......@@ -458,9 +458,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev->tracker = tracker;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
......@@ -502,7 +502,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
if (fn >= MLX5_MAX_PORTS)
return;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
......@@ -510,7 +510,7 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
......@@ -525,11 +525,11 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
......@@ -607,10 +607,10 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
return res;
}
......@@ -621,10 +621,10 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_active(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
return res;
}
......@@ -635,10 +635,10 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
return res;
}
......@@ -664,7 +664,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
......@@ -681,12 +681,36 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
dev_hold(ndev);
unlock:
mutex_unlock(&lag_mutex);
spin_unlock(&lag_lock);
return ndev;
}
EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave)
{
struct mlx5_lag *ldev;
u8 port = 0;
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
if (ldev->pf[MLX5_LAG_P1].netdev == slave)
port = MLX5_LAG_P1;
else
port = MLX5_LAG_P2;
port = ldev->v2p_map[port];
unlock:
spin_unlock(&lag_lock);
return port;
}
EXPORT_SYMBOL(mlx5_lag_get_slave_port);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
......@@ -723,7 +747,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
mutex_lock(&lag_mutex);
spin_lock(&lag_lock);
ldev = mlx5_lag_dev_get(dev);
if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
......@@ -733,6 +757,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
}
spin_unlock(&lag_lock);
for (i = 0; i < num_ports; ++i) {
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
......@@ -742,14 +767,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out);
if (ret)
goto unlock;
goto free;
for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
}
unlock:
mutex_unlock(&lag_mutex);
free:
kvfree(out);
return ret;
}
......
......@@ -1074,6 +1074,8 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
......
......@@ -1146,6 +1146,12 @@ struct netdev_net_notifier {
* int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to release previously enslaved netdev.
*
* struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
* struct sk_buff *skb,
* bool all_slaves);
* Get the xmit slave of master device. If all_slaves is true, function
* assume all the slaves can transmit.
*
* Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
......@@ -1389,6 +1395,9 @@ struct net_device_ops {
struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
......@@ -2731,6 +2740,9 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
......
......@@ -158,6 +158,10 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb);
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
struct sk_buff *skb);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
......
......@@ -200,7 +200,8 @@ struct bonding {
struct slave __rcu *curr_active_slave;
struct slave __rcu *current_arp_slave;
struct slave __rcu *primary_slave;
struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
struct bond_up_slave __rcu *usable_slaves;
struct bond_up_slave __rcu *all_slaves;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
int (*recv_probe)(const struct sk_buff *, struct bonding *,
......
......@@ -7785,6 +7785,28 @@ void netdev_bonding_info_change(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_bonding_info_change);
/**
* netdev_get_xmit_slave - Get the xmit slave of master device
* @skb: The packet
* @all_slaves: assume all the slaves are active
*
* The reference counters are not incremented so the caller must be
* careful with locks. The caller must hold RCU lock.
* %NULL is returned if no slave is found.
*/
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves)
{
const struct net_device_ops *ops = dev->netdev_ops;
if (!ops->ndo_get_xmit_slave)
return NULL;
return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
}
EXPORT_SYMBOL(netdev_get_xmit_slave);
static void netdev_adjacent_add_links(struct net_device *dev)
{
struct netdev_adjacent *iter;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment