Commit 3fd172d3 authored by Arend van Spriel's avatar Arend van Spriel Committed by John W. Linville

brcm80211: smac: use sk_buff list for handling frames in receive path

In the receive path the frames are obtained from the dma using
multiple sk_buff that were linked using the skb next pointer.
This has been changed and it now used sk_buff lists and skb_queue
functions instead.
Reported-by: default avatarJohannes Berg <johannes@sipsolutions.net>
Reviewed-by: default avatarPieter-Paul Giesberts <pieterpg@broadcom.com>
Reviewed-by: default avatarAlwin Beukers <alwin@broadcom.com>
Signed-off-by: default avatarArend van Spriel <arend@broadcom.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 81d2e2d1
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -901,7 +900,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) ...@@ -901,7 +900,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
/* /*
* !! rx entry routine * !! rx entry routine
* returns a pointer to the next frame received, or NULL if there are no more * returns the number packages in the next frame, or 0 if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain * supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed. * otherwise, it's treated as giant pkt and will be tossed.
...@@ -909,38 +908,40 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall) ...@@ -909,38 +908,40 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
* buffer data. After it reaches the max size of buffer, the data continues * buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header * in next DMA descriptor buffer WITHOUT DMA header
*/ */
struct sk_buff *dma_rx(struct dma_pub *pub) int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{ {
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p, *head, *tail; struct sk_buff_head dma_frames;
struct sk_buff *p, *next;
uint len; uint len;
uint pkt_len; uint pkt_len;
int resid = 0; int resid = 0;
int pktcnt = 1;
skb_queue_head_init(&dma_frames);
next_frame: next_frame:
head = _dma_getnextrxp(di, false); p = _dma_getnextrxp(di, false);
if (head == NULL) if (p == NULL)
return NULL; return 0;
len = le16_to_cpu(*(__le16 *) (head->data)); len = le16_to_cpu(*(__le16 *) (p->data));
DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
dma_spin_for_len(len, head); dma_spin_for_len(len, p);
/* set actual length */ /* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize); pkt_len = min((di->rxoffset + len), di->rxbufsize);
__skb_trim(head, pkt_len); __skb_trim(p, pkt_len);
skb_queue_tail(&dma_frames, p);
resid = len - (di->rxbufsize - di->rxoffset); resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */ /* check for single or multi-buffer rx */
if (resid > 0) { if (resid > 0) {
tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
tail->next = p;
pkt_len = min_t(uint, resid, di->rxbufsize); pkt_len = min_t(uint, resid, di->rxbufsize);
__skb_trim(p, pkt_len); __skb_trim(p, pkt_len);
skb_queue_tail(&dma_frames, p);
tail = p;
resid -= di->rxbufsize; resid -= di->rxbufsize;
pktcnt++;
} }
#ifdef BCMDBG #ifdef BCMDBG
...@@ -959,13 +960,18 @@ struct sk_buff *dma_rx(struct dma_pub *pub) ...@@ -959,13 +960,18 @@ struct sk_buff *dma_rx(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
di->name, len)); di->name, len));
brcmu_pkt_buf_free_skb(head); skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p);
}
di->dma.rxgiants++; di->dma.rxgiants++;
pktcnt = 1;
goto next_frame; goto next_frame;
} }
} }
return head; skb_queue_splice_tail(&dma_frames, skb_list);
return pktcnt;
} }
static bool dma64_rxidle(struct dma_info *di) static bool dma64_rxidle(struct dma_info *di)
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define _BRCM_DMA_H_ #define _BRCM_DMA_H_
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/skbuff.h>
#include "types.h" /* forward structure declarations */ #include "types.h" /* forward structure declarations */
/* map/unmap direction */ /* map/unmap direction */
...@@ -80,7 +81,7 @@ extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -80,7 +81,7 @@ extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
uint nrxpost, uint rxoffset, uint *msg_level); uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub); void dma_rxinit(struct dma_pub *pub);
struct sk_buff *dma_rx(struct dma_pub *pub); int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
bool dma_rxfill(struct dma_pub *pub); bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub); bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub); bool dma_txreset(struct dma_pub *pub);
......
...@@ -8115,21 +8115,17 @@ static bool ...@@ -8115,21 +8115,17 @@ static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{ {
struct sk_buff *p; struct sk_buff *p;
struct sk_buff *head = NULL; struct sk_buff *next = NULL;
struct sk_buff *tail = NULL; struct sk_buff_head recv_frames;
uint n = 0; uint n = 0;
uint bound_limit = bound ? RXBND : -1; uint bound_limit = bound ? RXBND : -1;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* gather received frames */ skb_queue_head_init(&recv_frames);
while ((p = dma_rx(wlc_hw->di[fifo]))) {
if (!tail) /* gather received frames */
head = tail = p; while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
else {
tail->prev = p;
tail = p;
}
/* !give others some time to run! */ /* !give others some time to run! */
if (++n >= bound_limit) if (++n >= bound_limit)
...@@ -8140,12 +8136,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound) ...@@ -8140,12 +8136,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
dma_rxfill(wlc_hw->di[fifo]); dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */ /* process each frame */
while ((p = head) != NULL) { skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le; struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh; struct d11rxhdr *rxh;
head = head->prev;
p->prev = NULL;
skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data; rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data; rxh = (struct d11rxhdr *)p->data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment