Commit 16254fc5 authored by Felix Fietkau's avatar Felix Fietkau

mt76: sdio: fix use of q->head and q->tail

Their use is reversed compared to DMA. The order for DMA makes more sense,
so let's use that
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 95f61e17
......@@ -97,7 +97,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
}
for (i = 0; i < intr->rx.num[qid]; i++) {
int index = (q->tail + i) % q->ndesc;
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
len = intr->rx.len[qid][i];
......@@ -112,7 +112,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
__free_pages(page, order);
spin_lock_bh(&q->lock);
q->tail = (q->tail + i) % q->ndesc;
q->head = (q->head + i) % q->ndesc;
q->queued += i;
spin_unlock_bh(&q->lock);
......@@ -166,7 +166,7 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
struct mt76_sdio *sdio = &dev->sdio;
int nframes = 0;
while (q->first != q->tail) {
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
int err, len = e->skb->len;
......
......@@ -98,8 +98,8 @@ mt76s_get_next_rx_entry(struct mt76_queue *q)
spin_lock_bh(&q->lock);
if (q->queued > 0) {
e = &q->entry[q->head];
q->head = (q->head + 1) % q->ndesc;
e = &q->entry[q->tail];
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_bh(&q->lock);
......@@ -142,17 +142,17 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
bool wake;
while (q->queued > n_dequeued) {
if (!q->entry[q->head].done)
if (!q->entry[q->tail].done)
break;
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
if (q->entry[q->tail].schedule) {
q->entry[q->tail].schedule = false;
n_sw_dequeued++;
}
entry = q->entry[q->head];
q->entry[q->head].done = false;
q->head = (q->head + 1) % q->ndesc;
entry = q->entry[q->tail];
q->entry[q->tail].done = false;
q->tail = (q->tail + 1) % q->ndesc;
n_dequeued++;
if (qid == MT_TXQ_MCU)
......@@ -222,7 +222,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
.skb = skb,
};
int err, len = skb->len;
u16 idx = q->tail;
u16 idx = q->head;
if (q->queued == q->ndesc)
return -ENOSPC;
......@@ -232,9 +232,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (err < 0)
return err;
q->entry[q->tail].skb = tx_info.skb;
q->entry[q->tail].buf_sz = len;
q->tail = (q->tail + 1) % q->ndesc;
q->entry[q->head].skb = tx_info.skb;
q->entry[q->head].buf_sz = len;
q->head = (q->head + 1) % q->ndesc;
q->queued++;
return idx;
......@@ -256,9 +256,9 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
spin_lock_bh(&q->lock);
q->entry[q->tail].buf_sz = len;
q->entry[q->tail].skb = skb;
q->tail = (q->tail + 1) % q->ndesc;
q->entry[q->head].buf_sz = len;
q->entry[q->head].skb = skb;
q->head = (q->head + 1) % q->ndesc;
q->queued++;
spin_unlock_bh(&q->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment