Commit 091189ab authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'cxgb4-add-udp-segmentation-offload-support'

Rahul Lakkireddy says:

====================
This series of patches add UDP Segmentation Offload (USO) supported
by Chelsio T5/T6 NICs.

Patch 1 updates the current Scatter Gather List (SGL) DMA unmap logic
for USO requests.

Patch 2 adds USO support for NIC and MQPRIO QoS offload Tx path.

Patch 3 adds missing stats for MQPRIO QoS offload Tx path.
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents a9f852e9 8311f0be
......@@ -673,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
unsigned int last_desc, ndesc, flits = 0;
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
struct tx_sw_desc *sgl_sdesc;
int qidx, left, credits;
unsigned int flits = 0, ndesc;
struct adapter *adap;
bool immediate = false;
struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
struct sec_path *sp;
bool immediate = false;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
......@@ -715,8 +715,14 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
sgl_sdesc = &q->q.sdesc[last_desc];
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
goto out_free;
}
......@@ -742,17 +748,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
int last_desc;
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
0, addr);
0, sgl_sdesc->addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
q->q.sdesc[last_desc].skb = skb;
q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
......
......@@ -735,7 +735,12 @@ struct tx_desc {
__be64 flit[8];
};
struct tx_sw_desc;
struct ulptx_sgl;
struct tx_sw_desc {
struct sk_buff *skb; /* SKB to free after getting completion */
dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
};
struct sge_txq {
unsigned int in_use; /* # of in-use Tx descriptors */
......@@ -767,6 +772,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
u8 dbqt; /* SGE Doorbell Queue Timer in use */
unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */
unsigned long tso; /* # of TSO requests */
unsigned long uso; /* # of USO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
......@@ -814,15 +820,10 @@ enum sge_eosw_state {
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
};
struct sge_eosw_desc {
struct sk_buff *skb; /* SKB to free after getting completion */
dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */
};
struct sge_eosw_txq {
spinlock_t lock; /* Per queue lock to synchronize completions */
enum sge_eosw_state state; /* Current ETHOFLD State */
struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */
struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
u32 ndesc; /* Number of descriptors */
u32 pidx; /* Current Producer Index */
u32 last_pidx; /* Last successfully transmitted Producer Index */
......@@ -849,6 +850,7 @@ struct sge_eohw_txq {
struct sge_txq q; /* HW Txq */
struct adapter *adap; /* Backpointer to adapter */
unsigned long tso; /* # of TSO requests */
unsigned long uso; /* # of USO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
......@@ -1151,11 +1153,6 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
struct ulptx_sgl *sgl;
};
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
......
......@@ -2748,6 +2748,7 @@ do { \
RL("RxDrops:", stats.rx_drops);
RL("RxBadPkts:", stats.bad_rx_pkts);
TL("TSO:", tso);
TL("USO:", uso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
......@@ -2796,6 +2797,7 @@ do { \
RL("RxAN", stats.an);
RL("RxNoMem", stats.nomem);
TL("TSO:", tso);
TL("USO:", uso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);
......
......@@ -91,6 +91,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"rx_bg3_frames_trunc ",
"tso ",
"uso ",
"tx_csum_offload ",
"rx_csum_good ",
"vlan_extractions ",
......@@ -220,6 +221,7 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
*/
struct queue_port_stats {
u64 tso;
u64 uso;
u64 tx_csum;
u64 rx_csum;
u64 vlan_ex;
......@@ -240,13 +242,15 @@ static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
int i;
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
struct sge_eohw_txq *eohw_tx;
unsigned int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
s->tso += tx->tso;
s->uso += tx->uso;
s->tx_csum += tx->tx_cso;
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
......@@ -254,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->gro_pkts += rx->stats.lro_pkts;
s->gro_merged += rx->stats.lro_merged;
}
if (adap->sge.eohw_txq) {
eohw_tx = &adap->sge.eohw_txq[p->first_qset];
for (i = 0; i < p->nqsets; i++, eohw_tx++) {
s->tso += eohw_tx->tso;
s->uso += eohw_tx->uso;
s->tx_csum += eohw_tx->tx_cso;
s->vlan_ins += eohw_tx->vlan_ins;
}
}
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
......
......@@ -1136,11 +1136,17 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
if (dev->num_tc) {
struct port_info *pi = netdev2pinfo(dev);
u8 ver, proto;
ver = ip_hdr(skb)->version;
proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
ip_hdr(skb)->protocol;
/* Send unsupported traffic pattern to normal NIC queues. */
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
ip_hdr(skb)->protocol != IPPROTO_TCP)
skb->encapsulation ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
return txq;
......@@ -5838,7 +5844,8 @@ static void free_some_resources(struct adapter *adapter)
t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
NETIF_F_GSO_UDP_L4)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
......
......@@ -70,7 +70,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev,
u32 eotid, u32 hwqid)
{
struct adapter *adap = netdev2adap(dev);
struct sge_eosw_desc *ring;
struct tx_sw_desc *ring;
memset(eosw_txq, 0, sizeof(*eosw_txq));
......
This diff is collapsed.
......@@ -536,7 +536,8 @@ struct fw_eth_tx_pkt_wr {
};
enum fw_eth_tx_eo_type {
FW_ETH_TX_EO_TYPE_TCPSEG = 1,
FW_ETH_TX_EO_TYPE_UDPSEG = 0,
FW_ETH_TX_EO_TYPE_TCPSEG,
};
struct fw_eth_tx_eo_wr {
......@@ -544,6 +545,17 @@ struct fw_eth_tx_eo_wr {
__be32 equiq_to_len16;
__be64 r3;
union fw_eth_tx_eo {
struct fw_eth_tx_eo_udpseg {
__u8 type;
__u8 ethlen;
__be16 iplen;
__u8 udplen;
__u8 rtplen;
__be16 r4;
__be16 mss;
__be16 schedpktsize;
__be32 plen;
} udpseg;
struct fw_eth_tx_eo_tcpseg {
__u8 type;
__u8 ethlen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment