Commit 099ac7ce authored by Michal Kazior's avatar Michal Kazior Committed by Kalle Valo

ath10k: change ce ring cleanup logic

Make ath10k_pci_init_pipes() effectively only
alter shared target-host data.

The per_transfer_context is a host-only thing.
It is necessary to preserve it's contents for a
more robust ring cleanup.

This is required for future warm reset fixes.
Signed-off-by: default avatarMichal Kazior <michal.kazior@tieto.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 4eb2e164
...@@ -835,9 +835,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -835,9 +835,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries); nentries = roundup_pow_of_two(attr->src_nentries);
memset(src_ring->per_transfer_context, 0,
nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask; src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index; src_ring->hw_index = src_ring->sw_index;
...@@ -872,9 +869,6 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ...@@ -872,9 +869,6 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries); nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->per_transfer_context, 0,
nentries * sizeof(*dest_ring->per_transfer_context));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask; dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index = dest_ring->write_index =
......
...@@ -1196,64 +1196,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ...@@ -1196,64 +1196,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
return 0; return 0;
} }
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{ {
struct ath10k *ar; struct ath10k *ar;
struct ath10k_pci *ar_pci; struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_pipe *ce_hdl; struct ath10k_ce_ring *ce_ring;
u32 buf_sz; struct sk_buff *skb;
struct sk_buff *netbuf; int i;
u32 ce_data;
buf_sz = pipe_info->buf_sz; ar = pci_pipe->hif_ce_state;
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->dest_ring;
/* Unused Copy Engine */ if (!ce_ring)
if (buf_sz == 0)
return; return;
ar = pipe_info->hif_ce_state; if (!pci_pipe->buf_sz)
ar_pci = ath10k_pci_priv(ar); return;
ce_hdl = pipe_info->ce_hdl;
while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, for (i = 0; i < ce_ring->nentries; i++) {
&ce_data) == 0) { skb = ce_ring->per_transfer_context[i];
dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, if (!skb)
netbuf->len + skb_tailroom(netbuf), continue;
ce_ring->per_transfer_context[i] = NULL;
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(netbuf); dev_kfree_skb_any(skb);
} }
} }
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{ {
struct ath10k *ar; struct ath10k *ar;
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_hdl; struct ath10k_ce_pipe *ce_pipe;
struct sk_buff *netbuf; struct ath10k_ce_ring *ce_ring;
u32 ce_data; struct ce_desc *ce_desc;
unsigned int nbytes; struct sk_buff *skb;
unsigned int id; unsigned int id;
u32 buf_sz; int i;
buf_sz = pipe_info->buf_sz; ar = pci_pipe->hif_ce_state;
ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
/* Unused Copy Engine */ if (!ce_ring)
if (buf_sz == 0)
return; return;
ar = pipe_info->hif_ce_state; if (!pci_pipe->buf_sz)
ar_pci = ath10k_pci_priv(ar); return;
ce_hdl = pipe_info->ce_hdl;
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, ce_desc = ce_ring->shadow_base;
&ce_data, &nbytes, &id) == 0) { if (WARN_ON(!ce_desc))
/* no need to call tx completion for NULL pointers */ return;
if (!netbuf)
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i];
if (!skb)
continue; continue;
ar_pci->msg_callbacks_current.tx_completion(ar, ce_ring->per_transfer_context[i] = NULL;
netbuf, id = MS(__le16_to_cpu(ce_desc[i].flags),
id); CE_DESC_FLAGS_META_DATA);
ar_pci->msg_callbacks_current.tx_completion(ar, skb, id);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment