Commit 70da6824 authored by Shannon Nelson's avatar Shannon Nelson Committed by Jeff Kirsher

ixgbe: enable TSO with IPsec offload

Fix things up to support TSO offload in conjunction
with IPsec hw offload.  This raises throughput with
IPsec offload on to nearly line rate.
Signed-off-by: default avatarShannon Nelson <shannon.nelson@oracle.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 1db685e6
...@@ -929,8 +929,13 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) ...@@ -929,8 +929,13 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_clear_hw_tables(adapter);
adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
adapter->netdev->features |= NETIF_F_HW_ESP;
adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP; #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
adapter->netdev->features |= IXGBE_ESP_FEATURES;
adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
return; return;
......
...@@ -7730,7 +7730,8 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -7730,7 +7730,8 @@ static void ixgbe_service_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_ring *tx_ring, static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u8 *hdr_len) u8 *hdr_len,
struct ixgbe_ipsec_tx_data *itd)
{ {
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
...@@ -7744,6 +7745,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7744,6 +7745,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
u32 fceof_saidx = 0;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -7769,13 +7771,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7769,13 +7771,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (ip.v4->version == 4) { if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb); unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
int len = csum_start - trans_start;
/* IP header will have to cancel out any data that /* IP header will have to cancel out any data that
* is not a part of the outer IP header * is not a part of the outer IP header, so set to
* a reverse csum if needed, else init check to 0.
*/ */
ip.v4->check = csum_fold(csum_partial(trans_start, ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
csum_start - trans_start, csum_fold(csum_partial(trans_start,
0)); len, 0)) : 0;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0; ip.v4->tot_len = 0;
...@@ -7806,12 +7810,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7806,12 +7810,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
fceof_saidx |= itd->sa_idx;
type_tucmd |= itd->flags | itd->trailer_len;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
mss_l4len_idx); mss_l4len_idx);
return 1; return 1;
...@@ -8502,7 +8509,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -8502,7 +8509,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop; goto out_drop;
#endif #endif
tso = ixgbe_tso(tx_ring, first, &hdr_len); tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (!tso) else if (!tso)
...@@ -9911,9 +9918,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -9911,9 +9918,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
/* We can only support IPV4 TSO in tunnels if we can mangle the /* We can only support IPV4 TSO in tunnels if we can mangle the
* inner IP ID field, so strip TSO if MANGLEID is not supported. * inner IP ID field, so strip TSO if MANGLEID is not supported.
* IPsec offoad sets skb->encapsulation but still can handle
* the TSO, so it's the exception.
*/ */
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
features &= ~NETIF_F_TSO; #ifdef CONFIG_XFRM
if (!skb->sp)
#endif
features &= ~NETIF_F_TSO;
}
return features; return features;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment