Commit 6c3f5aef authored by Ivan Vecera's avatar Ivan Vecera Committed by David S. Miller

bna: fix Rx data corruption with VLAN stripping enabled and MTU > 4096

The multi-buffer Rx mode implemented in the past introduced
a regression that causes a data corruption for received VLAN
traffic when VLAN tag stripping is enabled. This mode is supported
only be newer chipsets (1860) and is enabled when MTU > 4096.

When this mode is enabled Rx queue contains buffers with fixed size
2048 bytes. Any incoming packet larger than 2048 is divided into
multiple buffers that are attached as skb frags in polling routine.

The driver assumes that all buffers associated with a packet except
the last one is fully used (e.g. packet with size 5000 are divided
into 3 buffers 2048 + 2048 + 904 bytes) and ignores true size reported
in completions. This assumption is usually true but not when VLAN
packet is received and VLAN tag stripping is enabled. In this case
the first buffer is 2044 bytes long but as the driver always assumes
2048 bytes then 4 extra random bytes are included between the first
and the second frag. Additionally the driver sets checksum as correct
so the packet is properly processed by the core.

The driver needs to check the size of used space in each Rx buffer
reported by FW and not blindly use the fixed value.

Cc: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: default avatarIvan Vecera <ivecera@redhat.com>
Reviewed-by: default avatarRasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4e5448a3
...@@ -542,22 +542,31 @@ bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, ...@@ -542,22 +542,31 @@ bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
} }
static void static void
bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
u32 sop_ci, u32 nvecs, u32 last_fraglen)
{ {
struct bna_rcb *rcb;
struct bnad *bnad; struct bnad *bnad;
u32 ci, vec, len, totlen = 0;
struct bnad_rx_unmap_q *unmap_q; struct bnad_rx_unmap_q *unmap_q;
struct bnad_rx_unmap *unmap; struct bna_cq_entry *cq, *cmpl;
u32 ci, pi, totlen = 0;
cq = ccb->sw_q;
pi = ccb->producer_index;
cmpl = &cq[pi];
rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
unmap_q = rcb->unmap_q; unmap_q = rcb->unmap_q;
bnad = rcb->bnad; bnad = rcb->bnad;
ci = rcb->consumer_index;
/* prefetch header */ /* prefetch header */
prefetch(page_address(unmap_q->unmap[sop_ci].page) + prefetch(page_address(unmap_q->unmap[ci].page) +
unmap_q->unmap[sop_ci].page_offset); unmap_q->unmap[ci].page_offset);
while (nvecs--) {
struct bnad_rx_unmap *unmap;
u32 len;
for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
unmap = &unmap_q->unmap[ci]; unmap = &unmap_q->unmap[ci];
BNA_QE_INDX_INC(ci, rcb->q_depth); BNA_QE_INDX_INC(ci, rcb->q_depth);
...@@ -565,8 +574,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, ...@@ -565,8 +574,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
dma_unmap_addr(&unmap->vector, dma_addr), dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE); unmap->vector.len, DMA_FROM_DEVICE);
len = (vec == nvecs) ? len = ntohs(cmpl->length);
last_fraglen : unmap->vector.len;
skb->truesize += unmap->vector.len; skb->truesize += unmap->vector.len;
totlen += len; totlen += len;
...@@ -575,6 +583,9 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, ...@@ -575,6 +583,9 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
unmap->page = NULL; unmap->page = NULL;
unmap->vector.len = 0; unmap->vector.len = 0;
BNA_QE_INDX_INC(pi, ccb->q_depth);
cmpl = &cq[pi];
} }
skb->len += totlen; skb->len += totlen;
...@@ -704,7 +715,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -704,7 +715,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_cq_setup_skb(bnad, skb, unmap, len); bnad_cq_setup_skb(bnad, skb, unmap, len);
else else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); bnad_cq_setup_skb_frags(ccb, skb, nvecs);
rcb->rxq->rx_packets++; rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen; rcb->rxq->rx_bytes += totlen;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment