Commit 5b039241 authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: simplify TSO descriptor mapping

One issue with the original TSO code was that it was working too
hard to deal with skb layouts that were never going to show up,
such as an skb->data that was longer than a single descriptor's
length.  The other issue was trying to arrange the fragment dma
mapping at the same time as figuring out the descriptors needed.
There was just too much going on at the same time.

Now we do the dma mapping first, which sets up the buffers with
skb->data in buf[0] and the remaining frags in buf[1..n-1].
Next we spread the bufs across the descriptors needed, where
each descriptor gets up to mss number of bytes.
Signed-off-by: default avatarShannon Nelson <snelson@pensando.io>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 578ce046
...@@ -179,8 +179,11 @@ struct ionic_buf_info { ...@@ -179,8 +179,11 @@ struct ionic_buf_info {
struct page *page; struct page *page;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 page_offset; u32 page_offset;
u32 len;
}; };
#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
struct ionic_desc_info { struct ionic_desc_info {
union { union {
void *desc; void *desc;
...@@ -194,7 +197,7 @@ struct ionic_desc_info { ...@@ -194,7 +197,7 @@ struct ionic_desc_info {
struct ionic_rxq_sg_desc *rxq_sgl_desc; struct ionic_rxq_sg_desc *rxq_sgl_desc;
}; };
unsigned int nbufs; unsigned int nbufs;
struct ionic_buf_info bufs[IONIC_RX_MAX_SG_ELEMS + 1]; struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
ionic_desc_cb cb; ionic_desc_cb cb;
void *cb_arg; void *cb_arg;
}; };
......
...@@ -605,12 +605,51 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -605,12 +605,51 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
return dma_addr; return dma_addr;
} }
static int ionic_tx_map_tso(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_buf_info *buf_info)
{
struct device *dev = q->dev;
dma_addr_t dma_addr;
skb_frag_t *frag;
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
if (dma_mapping_error(dev, dma_addr))
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
buf_info++;
for (frag_idx = 0; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, buf_info++) {
frag = &skb_shinfo(skb)->frags[frag_idx];
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
if (dma_mapping_error(dev, dma_addr))
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
}
return 0;
dma_fail:
/* unwind the frag mappings and the head mapping */
while (frag_idx > 0) {
frag_idx--;
buf_info--;
dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
return -EIO;
}
static void ionic_tx_clean(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, struct ionic_cq_info *cq_info,
void *cb_arg) void *cb_arg)
{ {
struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_txq_sg_elem *elem = sg_desc->elems; struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = desc_info->desc; struct ionic_txq_desc *desc = desc_info->desc;
...@@ -623,20 +662,22 @@ static void ionic_tx_clean(struct ionic_queue *q, ...@@ -623,20 +662,22 @@ static void ionic_tx_clean(struct ionic_queue *q,
decode_txq_desc_cmd(le64_to_cpu(desc->cmd), decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
&opcode, &flags, &nsge, &addr); &opcode, &flags, &nsge, &addr);
/* use unmap_single only if either this is not TSO, if (opcode != IONIC_TXQ_DESC_OPCODE_TSO) {
* or this is first descriptor of a TSO
*/
if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
dma_unmap_single(dev, (dma_addr_t)addr, dma_unmap_single(dev, (dma_addr_t)addr,
le16_to_cpu(desc->len), DMA_TO_DEVICE); le16_to_cpu(desc->len), DMA_TO_DEVICE);
else
dma_unmap_page(dev, (dma_addr_t)addr,
le16_to_cpu(desc->len), DMA_TO_DEVICE);
for (i = 0; i < nsge; i++, elem++) for (i = 0; i < nsge; i++, elem++)
dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
le16_to_cpu(elem->len), DMA_TO_DEVICE); le16_to_cpu(elem->len), DMA_TO_DEVICE);
} else {
if (flags & IONIC_TXQ_DESC_FLAG_TSO_EOT) {
dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
}
if (cb_arg) { if (cb_arg) {
struct sk_buff *skb = cb_arg; struct sk_buff *skb = cb_arg;
...@@ -794,29 +835,23 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, ...@@ -794,29 +835,23 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{ {
struct ionic_buf_info buf_info[IONIC_MAX_FRAGS] = {{0}};
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *rewind_desc_info;
struct ionic_txq_sg_elem *elem; struct ionic_txq_sg_elem *elem;
struct device *dev = q->dev;
struct ionic_txq_desc *desc; struct ionic_txq_desc *desc;
unsigned int frag_left = 0; unsigned int chunk_len;
unsigned int offset = 0; unsigned int frag_rem;
u16 abort = q->head_idx; unsigned int frag_idx;
unsigned int len_left; unsigned int tso_rem;
unsigned int seg_rem;
dma_addr_t desc_addr; dma_addr_t desc_addr;
dma_addr_t frag_addr;
unsigned int hdrlen; unsigned int hdrlen;
unsigned int nfrags; unsigned int nfrags;
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
u16 rewind = abort;
unsigned int left;
unsigned int len; unsigned int len;
unsigned int mss; unsigned int mss;
skb_frag_t *frag;
bool start, done; bool start, done;
bool outer_csum; bool outer_csum;
dma_addr_t addr;
bool has_vlan; bool has_vlan;
u16 desc_len; u16 desc_len;
u8 desc_nsge; u8 desc_nsge;
...@@ -824,9 +859,12 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) ...@@ -824,9 +859,12 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap; bool encap;
int err; int err;
if (unlikely(ionic_tx_map_tso(q, skb, buf_info)))
return -EIO;
len = skb->len;
mss = skb_shinfo(skb)->gso_size; mss = skb_shinfo(skb)->gso_size;
nfrags = skb_shinfo(skb)->nr_frags; nfrags = skb_shinfo(skb)->nr_frags;
len_left = skb->len - skb_headlen(skb);
outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
has_vlan = !!skb_vlan_tag_present(skb); has_vlan = !!skb_vlan_tag_present(skb);
...@@ -851,117 +889,68 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) ...@@ -851,117 +889,68 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else else
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
seglen = hdrlen + mss; tso_rem = len;
left = skb_headlen(skb); seg_rem = min(tso_rem, hdrlen + mss);
desc = ionic_tx_tso_next(q, &elem); frag_idx = 0;
start = true; frag_addr = 0;
frag_rem = 0;
/* Chop skb->data up into desc segments */ start = true;
while (left > 0) { while (tso_rem > 0) {
len = min(seglen, left); desc = NULL;
frag_left = seglen - len; elem = NULL;
desc_addr = ionic_tx_map_single(q, skb->data + offset, len); desc_addr = 0;
if (dma_mapping_error(dev, desc_addr)) desc_len = 0;
goto err_out_abort;
desc_len = len;
desc_nsge = 0; desc_nsge = 0;
left -= len; /* loop until a full tcp segment can be created */
offset += len; while (seg_rem > 0) {
if (nfrags > 0 && frag_left > 0) /* if the fragment is exhausted get the next one */
continue; if (frag_rem == 0) {
done = (nfrags == 0 && left == 0); /* grab the next fragment */
ionic_tx_tso_post(q, desc, skb, frag_addr = buf_info[frag_idx].dma_addr;
desc_addr, desc_nsge, desc_len, frag_rem = buf_info[frag_idx].len;
hdrlen, mss, frag_idx++;
outer_csum,
vlan_tci, has_vlan,
start, done);
total_pkts++;
total_bytes += start ? len : len + hdrlen;
desc = ionic_tx_tso_next(q, &elem);
start = false;
seglen = mss;
} }
chunk_len = min(frag_rem, seg_rem);
/* Chop skb frags into desc segments */ if (!desc) {
/* fill main descriptor */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
offset = 0;
left = skb_frag_size(frag);
len_left -= left;
nfrags--;
stats->frags++;
while (left > 0) {
if (frag_left > 0) {
len = min(frag_left, left);
frag_left -= len;
addr = ionic_tx_map_frag(q, frag, offset, len);
if (dma_mapping_error(dev, addr))
goto err_out_abort;
elem->addr = cpu_to_le64(addr);
elem->len = cpu_to_le16(len);
elem++;
desc_nsge++;
left -= len;
offset += len;
if (nfrags > 0 && frag_left > 0)
continue;
done = (nfrags == 0 && left == 0);
ionic_tx_tso_post(q, desc, skb, desc_addr,
desc_nsge, desc_len,
hdrlen, mss, outer_csum,
vlan_tci, has_vlan,
start, done);
total_pkts++;
total_bytes += start ? len : len + hdrlen;
desc = ionic_tx_tso_next(q, &elem); desc = ionic_tx_tso_next(q, &elem);
start = false; desc_addr = frag_addr;
desc_len = chunk_len;
} else { } else {
len = min(mss, left); /* fill sg descriptor */
frag_left = mss - len; elem->addr = cpu_to_le64(frag_addr);
desc_addr = ionic_tx_map_frag(q, frag, elem->len = cpu_to_le16(chunk_len);
offset, len); elem++;
if (dma_mapping_error(dev, desc_addr)) desc_nsge++;
goto err_out_abort;
desc_len = len;
desc_nsge = 0;
left -= len;
offset += len;
if (nfrags > 0 && frag_left > 0)
continue;
done = (nfrags == 0 && left == 0);
ionic_tx_tso_post(q, desc, skb, desc_addr,
desc_nsge, desc_len,
hdrlen, mss, outer_csum,
vlan_tci, has_vlan,
start, done);
total_pkts++;
total_bytes += start ? len : len + hdrlen;
desc = ionic_tx_tso_next(q, &elem);
start = false;
} }
frag_addr += chunk_len;
frag_rem -= chunk_len;
tso_rem -= chunk_len;
seg_rem -= chunk_len;
}
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
if (done) {
memcpy(&q->info[q->head_idx].bufs, buf_info, sizeof(buf_info));
q->info[q->head_idx].nbufs = nfrags + 1;
} }
/* post descriptor */
ionic_tx_tso_post(q, desc, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
start = false;
} }
stats->pkts += total_pkts; stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
stats->bytes += total_bytes; stats->bytes += len;
stats->tso++; stats->tso++;
stats->tso_bytes += total_bytes; stats->tso_bytes = len;
return 0; return 0;
err_out_abort:
while (rewind != q->head_idx) {
rewind_desc_info = &q->info[rewind];
ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
rewind = (rewind + 1) & (q->num_descs - 1);
}
q->head_idx = abort;
return -ENOMEM;
} }
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment