Commit d003462a authored by Ido Schimmel's avatar Ido Schimmel Committed by David S. Miller

mlxsw: Simplify mlxsw_sx_port_xmit function

Previously we only checked if the transmission queue is not full in the
middle of the xmit function. This lead to complex logic due to the fact
that sometimes we need to reallocate the headroom for our Tx header.

Allow the switch driver to know if the transmission queue is not full
before sending the packet and remove this complex logic.
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7b7b9cff
......@@ -865,6 +865,16 @@ static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
return container_of(driver_priv, struct mlxsw_core, driver_priv);
}
bool mlxsw_core_skb_transmit_busy(void *driver_priv,
const struct mlxsw_tx_info *tx_info)
{
struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
tx_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
......
......@@ -73,6 +73,9 @@ struct mlxsw_tx_info {
bool is_emad;
};
bool mlxsw_core_skb_transmit_busy(void *driver_priv,
const struct mlxsw_tx_info *tx_info);
int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
......@@ -177,6 +180,8 @@ struct mlxsw_bus {
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile);
void (*fini)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
......
......@@ -1443,6 +1443,15 @@ mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
}
static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
const struct mlxsw_tx_info *tx_info)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
return !mlxsw_pci_queue_elem_info_producer_get(q);
}
static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
......@@ -1625,11 +1634,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
}
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
.kind = "pci",
.init = mlxsw_pci_init,
.fini = mlxsw_pci_fini,
.skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
.skb_transmit = mlxsw_pci_skb_transmit,
.cmd_exec = mlxsw_pci_cmd_exec,
};
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
......
......@@ -300,31 +300,26 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
.local_port = mlxsw_sx_port->local_port,
.is_emad = false,
};
struct sk_buff *skb_old = NULL;
int err;
if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
struct sk_buff *skb_new;
struct sk_buff *skb_orig = skb;
skb_old = skb;
skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
if (!skb_new) {
skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
if (!skb) {
this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
dev_kfree_skb_any(skb_old);
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
skb = skb_new;
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
if (err == -EAGAIN) {
if (skb_old)
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
if (skb_old)
dev_kfree_skb_any(skb_old);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment