Commit 12e6d7e6 authored by David S. Miller's avatar David S. Miller

Merge branch 'enetc-swtso'

Ioana Ciornei says:

====================
net: enetc: add support for software TSO

This series adds support for driver level TSO in the enetc driver.

Ever since the ENETC MDIO erratum workaround is in place, the Tx path is
incurring a penalty (enetc_lock_mdio/enetc_unlock_mdio) for each skb to
be sent out. On top of this, ENETC does not support Tx checksum
offloading. This means that software TSO would help performance just by
the fact that one single mdio lock/unlock sequence would cover multiple
packets sent. On the other hand, checksum needs to be computed in
software since the controller cannot handle it.

This is why, beside using the usual tso_build_hdr()/tso_build_data()
this specific implementation also has to compute the checksum, both IP
and L4, for each resulted segment.

Even with that, the performance improvement of a TCP flow running on a
single A72@1.3GHz of the LS1028A SoC (2.5Gbit/s port) is the following:

before: 1.63 Gbits/sec
after:  2.34 Gbits/sec

Changes in v2:
 - declare NETIF_F_HW_CSUM instead of NETIF_F_IP_CSUM in 1/2
 - add support for TSO over IPv6 (NETIF_F_TSO6 and csum compute) in 2/2
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 36ee7281 fb8629e2
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ptp_classify.h> #include <linux/ptp_classify.h>
#include <net/pkt_sched.h> #include <net/pkt_sched.h>
#include <net/tso.h>
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{ {
...@@ -314,12 +315,261 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) ...@@ -314,12 +315,261 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
return 0; return 0;
} }
static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
struct enetc_tx_swbd *tx_swbd,
union enetc_tx_bd *txbd, int *i, int hdr_len,
int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
if (skb_vlan_tag_present(skb))
flags |= ENETC_TXBD_FLAGS_EX;
txbd_tmp.addr = cpu_to_le64(addr);
txbd_tmp.buf_len = cpu_to_le16(hdr_len);
/* first BD needs frm_len and offload flags set */
txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
txbd_tmp.flags = flags;
/* For the TSO header we do not set the dma address since we do not
* want it unmapped when we do cleanup. We still set len so that we
* count the bytes sent.
*/
tx_swbd->len = hdr_len;
tx_swbd->do_twostep_tstamp = false;
tx_swbd->check_wb = false;
/* Actually write the header in the BD */
*txbd = txbd_tmp;
/* Add extension BD for VLAN */
if (flags & ENETC_TXBD_FLAGS_EX) {
/* Get the next BD */
enetc_bdr_idx_inc(tx_ring, i);
txbd = ENETC_TXBD(*tx_ring, *i);
tx_swbd = &tx_ring->tx_swbd[*i];
prefetchw(txbd);
/* Setup the VLAN fields */
enetc_clear_tx_bd(&txbd_tmp);
txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
txbd_tmp.ext.tpid = 0; /* < C-TAG */
e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
}
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
struct enetc_tx_swbd *tx_swbd,
union enetc_tx_bd *txbd, char *data,
int size, bool last_bd)
{
union enetc_tx_bd txbd_tmp;
dma_addr_t addr;
u8 flags = 0;
enetc_clear_tx_bd(&txbd_tmp);
addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
netdev_err(tx_ring->ndev, "DMA map error\n");
return -ENOMEM;
}
if (last_bd) {
flags |= ENETC_TXBD_FLAGS_F;
tx_swbd->is_eof = 1;
}
txbd_tmp.addr = cpu_to_le64(addr);
txbd_tmp.buf_len = cpu_to_le16(size);
txbd_tmp.flags = flags;
tx_swbd->dma = addr;
tx_swbd->len = size;
tx_swbd->dir = DMA_TO_DEVICE;
*txbd = txbd_tmp;
return 0;
}
static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
char *hdr, int hdr_len, int *l4_hdr_len)
{
char *l4_hdr = hdr + skb_transport_offset(skb);
int mac_hdr_len = skb_network_offset(skb);
if (tso->tlen != sizeof(struct udphdr)) {
struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
tcph->check = 0;
} else {
struct udphdr *udph = (struct udphdr *)(l4_hdr);
udph->check = 0;
}
/* Compute the IP checksum. This is necessary since tso_build_hdr()
* already incremented the IP ID field.
*/
if (!tso->ipv6) {
struct iphdr *iph = (void *)(hdr + mac_hdr_len);
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
/* Compute the checksum over the L4 header. */
*l4_hdr_len = hdr_len - skb_transport_offset(skb);
return csum_partial(l4_hdr, *l4_hdr_len, 0);
}
static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
struct sk_buff *skb, char *hdr, int len,
__wsum sum)
{
char *l4_hdr = hdr + skb_transport_offset(skb);
__sum16 csum_final;
/* Complete the L4 checksum by appending the pseudo-header to the
* already computed checksum.
*/
if (!tso->ipv6)
csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
len, ip_hdr(skb)->protocol, sum);
else
csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
len, ipv6_hdr(skb)->nexthdr, sum);
if (tso->tlen != sizeof(struct udphdr)) {
struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
tcph->check = csum_final;
} else {
struct udphdr *udph = (struct udphdr *)(l4_hdr);
udph->check = csum_final;
}
}
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
struct tso_t tso;
__wsum csum, csum2;
int count = 0, pos;
int err, i, bd_data_num;
/* Initialize the TSO handler, and prepare the first payload */
hdr_len = tso_start(skb, &tso);
total_len = skb->len - hdr_len;
i = tx_ring->next_to_use;
while (total_len > 0) {
char *hdr;
/* Get the BD */
txbd = ENETC_TXBD(*tx_ring, i);
tx_swbd = &tx_ring->tx_swbd[i];
prefetchw(txbd);
/* Determine the length of this packet */
data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_len;
/* prepare packet headers: MAC + IP + TCP */
hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
bd_data_num = 0;
count++;
while (data_len > 0) {
int size;
size = min_t(int, tso.size, data_len);
/* Advance the index in the BDR */
enetc_bdr_idx_inc(tx_ring, &i);
txbd = ENETC_TXBD(*tx_ring, i);
tx_swbd = &tx_ring->tx_swbd[i];
prefetchw(txbd);
/* Compute the checksum over this segment of data and
* add it to the csum already computed (over the L4
* header and possible other data segments).
*/
csum2 = csum_partial(tso.data, size, 0);
csum = csum_block_add(csum, csum2, pos);
pos += size;
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
if (err)
goto err_map_data;
data_len -= size;
count++;
bd_data_num++;
tso_build_data(skb, &tso, size);
if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
goto err_chained_bd;
}
enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
if (total_len == 0)
tx_swbd->skb = skb;
/* Go to the next BD */
enetc_bdr_idx_inc(tx_ring, &i);
}
tx_ring->next_to_use = i;
enetc_update_tx_ring_tail(tx_ring);
return count;
err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
do {
tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_frame(tx_ring, tx_swbd);
if (i == 0)
i = tx_ring->bd_count;
i--;
} while (count--);
return 0;
}
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev) struct net_device *ndev)
{ {
struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring; struct enetc_bdr *tx_ring;
int count; int count, err;
/* Queue one-step Sync packet if already locked */ /* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
...@@ -332,19 +582,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, ...@@ -332,19 +582,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping]; tx_ring = priv->tx_ring[skb->queue_mapping];
if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) if (skb_is_gso(skb)) {
if (unlikely(skb_linearize(skb))) if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
goto drop_packet_err; netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_BUSY;
}
count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ enetc_lock_mdio();
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { count = enetc_map_tx_tso_buffs(tx_ring, skb);
netif_stop_subqueue(ndev, tx_ring->index); enetc_unlock_mdio();
return NETDEV_TX_BUSY; } else {
} if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_BUSY;
}
enetc_lock_mdio(); if (skb->ip_summed == CHECKSUM_PARTIAL) {
count = enetc_map_tx_buffs(tx_ring, skb); err = skb_checksum_help(skb);
enetc_unlock_mdio(); if (err)
goto drop_packet_err;
}
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
}
if (unlikely(!count)) if (unlikely(!count))
goto drop_packet_err; goto drop_packet_err;
...@@ -1493,15 +1759,30 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr) ...@@ -1493,15 +1759,30 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr)
return -ENOMEM; return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
if (err) { if (err)
vfree(txr->tx_swbd); goto err_alloc_bdr;
return err;
} txr->tso_headers = dma_alloc_coherent(txr->dev,
txr->bd_count * TSO_HEADER_SIZE,
&txr->tso_headers_dma,
GFP_KERNEL);
if (err)
goto err_alloc_tso;
txr->next_to_clean = 0; txr->next_to_clean = 0;
txr->next_to_use = 0; txr->next_to_use = 0;
return 0; return 0;
err_alloc_tso:
dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
err_alloc_bdr:
vfree(txr->tx_swbd);
txr->tx_swbd = NULL;
return err;
} }
static void enetc_free_txbdr(struct enetc_bdr *txr) static void enetc_free_txbdr(struct enetc_bdr *txr)
...@@ -1513,6 +1794,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr) ...@@ -1513,6 +1794,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
size = txr->bd_count * sizeof(union enetc_tx_bd); size = txr->bd_count * sizeof(union enetc_tx_bd);
dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
txr->tso_headers, txr->tso_headers_dma);
txr->tso_headers = NULL;
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL; txr->bd_base = NULL;
......
...@@ -112,6 +112,10 @@ struct enetc_bdr { ...@@ -112,6 +112,10 @@ struct enetc_bdr {
dma_addr_t bd_dma_base; dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */ u8 tsd_enable; /* Time specific departure */
bool ext_en; /* enable h/w descriptor extensions */ bool ext_en; /* enable h/w descriptor extensions */
/* DMA buffer for TSO headers */
char *tso_headers;
dma_addr_t tso_headers_dma;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
......
...@@ -759,10 +759,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ...@@ -759,10 +759,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK; NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX; NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss) if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH; ndev->hw_features |= NETIF_F_RXHASH;
......
...@@ -122,10 +122,14 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ...@@ -122,10 +122,14 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX; NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX; NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss) if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH; ndev->hw_features |= NETIF_F_RXHASH;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment