Commit f88f113a authored by Vladimir Kondratiev's avatar Vladimir Kondratiev Committed by John W. Linville

wil6210: Introduce struct for sw context

Enable adding more data to the SW context.
For now, add flag "mapped_as_page", to separate decisions on free-ing skb
and type of DMA mapping.
This allows linking skb itself to any descriptor of fragmented skb.
Signed-off-by: default avatarVladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 03269c65
...@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, ...@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
if ((i % 64) == 0 && (i != 0)) if ((i % 64) == 0 && (i != 0))
seq_printf(s, "\n"); seq_printf(s, "\n");
seq_printf(s, "%s", (d->dma.status & BIT(0)) ? seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
"S" : (vring->ctx[i] ? "H" : "h")); "S" : (vring->ctx[i].skb ? "H" : "h"));
} }
seq_printf(s, "\n"); seq_printf(s, "\n");
} }
...@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) ...@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
volatile struct vring_tx_desc *d = volatile struct vring_tx_desc *d =
&(vring->va[dbg_txdesc_index].tx); &(vring->va[dbg_txdesc_index].tx);
volatile u32 *u = (volatile u32 *)d; volatile u32 *u = (volatile u32 *)d;
struct sk_buff *skb = vring->ctx[dbg_txdesc_index]; struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index); seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
......
...@@ -70,7 +70,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) ...@@ -70,7 +70,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->swhead = 0; vring->swhead = 0;
vring->swtail = 0; vring->swtail = 0;
vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL); vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) { if (!vring->ctx) {
vring->va = NULL; vring->va = NULL;
return -ENOMEM; return -ENOMEM;
...@@ -108,39 +108,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, ...@@ -108,39 +108,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
while (!wil_vring_is_empty(vring)) { while (!wil_vring_is_empty(vring)) {
dma_addr_t pa; dma_addr_t pa;
struct sk_buff *skb;
u16 dmalen; u16 dmalen;
struct wil_ctx *ctx;
if (tx) { if (tx) {
struct vring_tx_desc dd, *d = &dd; struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d = volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx; &vring->va[vring->swtail].tx;
ctx = &vring->ctx[vring->swtail];
*d = *_d; *d = *_d;
pa = wil_desc_addr(&d->dma.addr); pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length); dmalen = le16_to_cpu(d->dma.length);
skb = vring->ctx[vring->swtail]; if (vring->ctx[vring->swtail].mapped_as_page) {
if (skb) {
dma_unmap_single(dev, pa, dmalen,
DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
vring->ctx[vring->swtail] = NULL;
} else {
dma_unmap_page(dev, pa, dmalen, dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else {
dma_unmap_single(dev, pa, dmalen,
DMA_TO_DEVICE);
} }
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring); vring->swtail = wil_vring_next_tail(vring);
} else { /* rx */ } else { /* rx */
struct vring_rx_desc dd, *d = &dd; struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d = volatile struct vring_rx_desc *_d =
&vring->va[vring->swhead].rx; &vring->va[vring->swhead].rx;
ctx = &vring->ctx[vring->swhead];
*d = *_d; *d = *_d;
pa = wil_desc_addr(&d->dma.addr); pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length); dmalen = le16_to_cpu(d->dma.length);
skb = vring->ctx[vring->swhead];
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
kfree_skb(skb); kfree_skb(ctx->skb);
wil_vring_advance_head(vring, 1); wil_vring_advance_head(vring, 1);
} }
} }
...@@ -187,7 +187,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, ...@@ -187,7 +187,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16(sz); d->dma.length = cpu_to_le16(sz);
*_d = *d; *_d = *d;
vring->ctx[i] = skb; vring->ctx[i].skb = skb;
return 0; return 0;
} }
...@@ -352,11 +352,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, ...@@ -352,11 +352,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL; return NULL;
} }
skb = vring->ctx[vring->swhead]; skb = vring->ctx[vring->swhead].skb;
d = wil_skb_rxdesc(skb); d = wil_skb_rxdesc(skb);
*d = *_d; *d = *_d;
pa = wil_desc_addr(&d->dma.addr); pa = wil_desc_addr(&d->dma.addr);
vring->ctx[vring->swhead] = NULL; vring->ctx[vring->swhead].skb = NULL;
wil_vring_advance_head(vring, 1); wil_vring_advance_head(vring, 1);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
...@@ -703,7 +703,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, ...@@ -703,7 +703,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa))) if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error; goto dma_error;
wil_tx_desc_map(d, pa, len, vring_index); wil_tx_desc_map(d, pa, len, vring_index);
vring->ctx[i] = NULL; vring->ctx[i].mapped_as_page = 1;
*_d = *d; *_d = *d;
} }
/* for the last seg only */ /* for the last seg only */
...@@ -724,7 +724,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, ...@@ -724,7 +724,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
* to prevent skb release before accounting * to prevent skb release before accounting
* in case of immediate "tx done" * in case of immediate "tx done"
*/ */
vring->ctx[i] = skb_get(skb); vring->ctx[i].skb = skb_get(skb);
return 0; return 0;
dma_error: dma_error:
...@@ -732,6 +732,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, ...@@ -732,6 +732,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
/* Note: increment @f to operate with positive index */ /* Note: increment @f to operate with positive index */
for (f++; f > 0; f--) { for (f++; f > 0; f--) {
u16 dmalen; u16 dmalen;
struct wil_ctx *ctx = &vring->ctx[i];
i = (swhead + f) % vring->size; i = (swhead + f) % vring->size;
_d = &(vring->va[i].tx); _d = &(vring->va[i].tx);
...@@ -739,10 +740,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, ...@@ -739,10 +740,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d->dma.status = TX_DMA_STATUS_DU; _d->dma.status = TX_DMA_STATUS_DU;
pa = wil_desc_addr(&d->dma.addr); pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length); dmalen = le16_to_cpu(d->dma.length);
if (vring->ctx[i]) if (ctx->mapped_as_page)
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
else
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
else
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
memset(ctx, 0, sizeof(*ctx));
} }
return -EINVAL; return -EINVAL;
...@@ -821,8 +827,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) ...@@ -821,8 +827,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
&vring->va[vring->swtail].tx; &vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = &dd; struct vring_tx_desc dd, *d = &dd;
dma_addr_t pa; dma_addr_t pa;
struct sk_buff *skb;
u16 dmalen; u16 dmalen;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
struct sk_buff *skb = ctx->skb;
*d = *_d; *d = *_d;
...@@ -840,7 +847,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) ...@@ -840,7 +847,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
(const void *)d, sizeof(*d), false); (const void *)d, sizeof(*d), false);
pa = wil_desc_addr(&d->dma.addr); pa = wil_desc_addr(&d->dma.addr);
skb = vring->ctx[vring->swtail]; if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
else
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
if (skb) { if (skb) {
if (d->dma.error == 0) { if (d->dma.error == 0) {
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
...@@ -849,12 +860,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) ...@@ -849,12 +860,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
ndev->stats.tx_errors++; ndev->stats.tx_errors++;
} }
dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
vring->ctx[vring->swtail] = NULL;
} else {
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
} }
memset(ctx, 0, sizeof(*ctx));
/* /*
* There is no need to touch HW descriptor: * There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design, * - ststus bit TX_DMA_STATUS_DU is set by design,
......
...@@ -183,6 +183,14 @@ struct pending_wmi_event { ...@@ -183,6 +183,14 @@ struct pending_wmi_event {
} __packed event; } __packed event;
}; };
/**
* struct wil_ctx - software context for Vring descriptor
*/
struct wil_ctx {
struct sk_buff *skb;
u8 mapped_as_page:1;
};
union vring_desc; union vring_desc;
struct vring { struct vring {
...@@ -192,7 +200,7 @@ struct vring { ...@@ -192,7 +200,7 @@ struct vring {
u32 swtail; u32 swtail;
u32 swhead; u32 swhead;
u32 hwtail; /* write here to inform hw */ u32 hwtail; /* write here to inform hw */
void **ctx; /* void *ctx[size] - software context */ struct wil_ctx *ctx; /* ctx[size] - software context */
}; };
enum { /* for wil6210_priv.status */ enum { /* for wil6210_priv.status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment