Commit bf239741 authored by Ilya Lesokhin's avatar Ilya Lesokhin Committed by David S. Miller

net/mlx5e: TLS, Add Innova TLS TX offload data path

Implement the TLS tx offload data path according to the
requirements of the TLS generic NIC offload infrastructure.

Special metadata ethertype is used to pass information to
the hardware.
Signed-off-by: default avatarIlya Lesokhin <ilyal@mellanox.com>
Signed-off-by: default avatarBoris Pismenny <borisp@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c83294b9
...@@ -28,6 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib ...@@ -28,6 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o
CFLAGS_tracepoint.o := -I$(src) CFLAGS_tracepoint.o := -I$(src)
...@@ -335,6 +335,7 @@ enum { ...@@ -335,6 +335,7 @@ enum {
MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
}; };
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
...@@ -830,6 +831,8 @@ void mlx5e_build_ptys2ethtool_map(void); ...@@ -830,6 +831,8 @@ void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback); void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
...@@ -945,6 +948,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) ...@@ -945,6 +948,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
} }
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
struct mlx5_wq_cyc *wq;
wq = &sq->wq;
*pi = sq->pc & wq->sz_m1;
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(*wqe, 0, sizeof(**wqe));
}
static inline static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{ {
......
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5E_EN_ACCEL_H__
#define __MLX5E_EN_ACCEL_H__
#ifdef CONFIG_MLX5_ACCEL
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "en.h"
static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
struct mlx5e_txqsq *sq,
struct net_device *dev,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) {
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
if (unlikely(!skb))
return NULL;
}
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
if (unlikely(!skb))
return NULL;
}
#endif
return skb;
}
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5E_EN_ACCEL_H__ */
...@@ -169,5 +169,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) ...@@ -169,5 +169,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
if (!mlx5_accel_is_tls_device(priv->mdev)) if (!mlx5_accel_is_tls_device(priv->mdev))
return; return;
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->tlsdev_ops = &mlx5e_tls_ops; netdev->tlsdev_ops = &mlx5e_tls_ops;
} }
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "en_accel/tls.h"
#include "en_accel/tls_rxtx.h"
#define SYNDROME_OFFLOAD_REQUIRED 32
#define SYNDROME_SYNC 33
struct sync_info {
u64 rcd_sn;
s32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
struct mlx5e_tls_metadata {
/* One byte of syndrome followed by 3 bytes of swid */
__be32 syndrome_swid;
__be16 first_seq;
/* packet type ID field */
__be16 ethertype;
} __packed;
static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
{
struct mlx5e_tls_metadata *pet;
struct ethhdr *eth;
if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
return -ENOMEM;
eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
pet = (struct mlx5e_tls_metadata *)(eth + 1);
memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
2 * ETH_ALEN);
eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
return 0;
}
static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
u32 tcp_seq, struct sync_info *info)
{
int remaining, i = 0, ret = -EINVAL;
struct tls_record_info *record;
unsigned long flags;
s32 sync_size;
spin_lock_irqsave(&context->base.lock, flags);
record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
if (unlikely(!record))
goto out;
sync_size = tcp_seq - tls_record_start_seq(record);
info->sync_len = sync_size;
if (unlikely(sync_size < 0)) {
if (tls_record_is_start_marker(record))
goto done;
goto out;
}
remaining = sync_size;
while (remaining > 0) {
info->frags[i] = record->frags[i];
__skb_frag_ref(&info->frags[i]);
remaining -= skb_frag_size(&info->frags[i]);
if (remaining < 0)
skb_frag_size_add(&info->frags[i], remaining);
i++;
}
info->nr_frags = i;
done:
ret = 0;
out:
spin_unlock_irqrestore(&context->base.lock, flags);
return ret;
}
static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
struct sk_buff *nskb, u32 tcp_seq,
int headln, __be64 rcd_sn)
{
struct mlx5e_tls_metadata *pet;
u8 syndrome = SYNDROME_SYNC;
struct iphdr *iph;
struct tcphdr *th;
int data_len, mss;
nskb->dev = skb->dev;
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb_network_offset(skb));
skb_set_transport_header(nskb, skb_transport_offset(skb));
memcpy(nskb->data, skb->data, headln);
memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
iph = ip_hdr(nskb);
iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
th = tcp_hdr(nskb);
data_len = nskb->len - headln;
tcp_seq -= data_len;
th->seq = htonl(tcp_seq);
mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
skb_shinfo(nskb)->gso_size = 0;
if (data_len > mss) {
skb_shinfo(nskb)->gso_size = mss;
skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
}
skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
memcpy(pet, &syndrome, sizeof(syndrome));
pet->first_seq = htons(tcp_seq);
/* MLX5 devices don't care about the checksum partial start, offset
* and pseudo header
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping;
}
static struct sk_buff *
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
int i;
sq->stats.tls_ooo++;
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info))
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
goto err_out;
if (unlikely(info.sync_len < 0)) {
u32 payload;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload = skb->len - headln;
if (likely(payload <= -info.sync_len))
/* SKB payload doesn't require offload
*/
return skb;
netdev_err(skb->dev,
"Can't offload from the middle of an SKB [seq: %X, offload_seq: %X, end_seq: %X]\n",
tcp_seq, tcp_seq + payload + info.sync_len,
tcp_seq + payload);
goto err_out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid)))
goto err_out;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
linear_len += headln + sizeof(info.rcd_sn);
nskb = alloc_skb(linear_len, GFP_ATOMIC);
if (unlikely(!nskb))
goto err_out;
context->expected_seq = tcp_seq + skb->len - headln;
skb_put(nskb, linear_len);
for (i = 0; i < info.nr_frags; i++)
skb_shinfo(nskb)->frags[i] = info.frags[i];
skb_shinfo(nskb)->nr_frags = info.nr_frags;
nskb->data_len = info.sync_len;
nskb->len += info.sync_len;
sq->stats.tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb;
err_out:
dev_kfree_skb_any(skb);
return NULL;
}
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
{
struct mlx5e_tls_offload_context *context;
struct tls_context *tls_ctx;
u32 expected_seq;
int datalen;
u32 skb_seq;
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (!datalen)
goto out;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != netdev))
goto out;
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq)) {
skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi);
goto out;
}
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
dev_kfree_skb_any(skb);
skb = NULL;
goto out;
}
context->expected_seq = skb_seq + datalen;
out:
return skb;
}
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5E_TLS_RXTX_H__
#define __MLX5E_TLS_RXTX_H__
#ifdef CONFIG_MLX5_EN_TLS
#include <linux/skbuff.h>
#include "en.h"
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
struct mlx5e_tx_wqe **wqe,
u16 *pi);
#endif /* CONFIG_MLX5_EN_TLS */
#endif /* __MLX5E_TLS_RXTX_H__ */
...@@ -1016,6 +1016,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1016,6 +1016,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev)) if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
param->wq.db_numa_node = cpu_to_node(c->cpu); param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
......
...@@ -43,6 +43,12 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -43,6 +43,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
...@@ -161,6 +167,10 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -161,6 +167,10 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none; s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial; s->tx_csum_partial += sq_stats->csum_partial;
#ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
#endif
} }
} }
......
...@@ -93,6 +93,11 @@ struct mlx5e_sw_stats { ...@@ -93,6 +93,11 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive; u64 rx_cache_waive;
u64 ch_eq_rearm; u64 ch_eq_rearm;
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_ooo;
u64 tx_tls_resync_bytes;
#endif
/* Special handling counters */ /* Special handling counters */
u64 link_down_events_phy; u64 link_down_events_phy;
}; };
...@@ -194,6 +199,10 @@ struct mlx5e_sq_stats { ...@@ -194,6 +199,10 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner; u64 csum_partial_inner;
u64 added_vlan_packets; u64 added_vlan_packets;
u64 nop; u64 nop;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
#endif
/* less likely accessed in data path */ /* less likely accessed in data path */
u64 csum_none; u64 csum_none;
u64 stopped; u64 stopped;
......
...@@ -35,12 +35,21 @@ ...@@ -35,12 +35,21 @@
#include <net/dsfield.h> #include <net/dsfield.h>
#include "en.h" #include "en.h"
#include "ipoib/ipoib.h" #include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h" #include "en_accel/en_accel.h"
#include "lib/clock.h" #include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#ifndef CONFIG_MLX5_EN_TLS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM) MLX5E_SQ_NOPS_ROOM)
#else
/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
* enough room for a resync SKB, a normal SKB and a NOP
*/
#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
#endif
static inline void mlx5e_tx_dma_unmap(struct device *pdev, static inline void mlx5e_tx_dma_unmap(struct device *pdev,
struct mlx5e_sq_dma *dma) struct mlx5e_sq_dma *dma)
...@@ -325,8 +334,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -325,8 +334,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
} }
} }
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi) struct mlx5e_tx_wqe *wqe, u16 pi)
{ {
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
...@@ -399,21 +408,19 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -399,21 +408,19 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; struct mlx5e_tx_wqe *wqe;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_txqsq *sq;
u16 pi = sq->pc & wq->sz_m1; u16 pi;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memset(wqe, 0, sizeof(*wqe)); sq = priv->txq2sq[skb_get_queue_mapping(skb)];
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_ACCEL
if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { /* might send skbs and update wqe and pi */
skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
if (unlikely(!skb)) if (unlikely(!skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
#endif #endif
return mlx5e_sq_xmit(sq, skb, wqe, pi); return mlx5e_sq_xmit(sq, skb, wqe, pi);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment