Commit f3950a41 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: set txwi_size according to the driver value

Dynamically allocate txwi since new chipsets will use longer txwi
descriptors
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent ce0fd825
...@@ -296,12 +296,14 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -296,12 +296,14 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_txwi_cache *t; struct mt76_txwi_cache *t;
struct sk_buff *iter; struct sk_buff *iter;
dma_addr_t addr; dma_addr_t addr;
u8 *txwi;
t = mt76_get_txwi(dev); t = mt76_get_txwi(dev);
if (!t) { if (!t) {
ieee80211_free_txskb(dev->hw, skb); ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM; return -ENOMEM;
} }
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL; skb->prev = skb->next = NULL;
if (dev->drv->tx_aligned4_skbs) if (dev->drv->tx_aligned4_skbs)
...@@ -331,11 +333,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -331,11 +333,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
} }
tx_info.nbuf = n; tx_info.nbuf = n;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, qid, wcid, sta, ret = dev->drv->tx_prepare_skb(dev, txwi, skb, qid, wcid, sta,
&tx_info); &tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi), dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (ret < 0) if (ret < 0)
goto unmap; goto unmap;
......
...@@ -229,12 +229,10 @@ struct mt76_txq { ...@@ -229,12 +229,10 @@ struct mt76_txq {
}; };
struct mt76_txwi_cache { struct mt76_txwi_cache {
u32 txwi[8];
dma_addr_t dma_addr;
struct list_head list; struct list_head list;
dma_addr_t dma_addr;
}; };
struct mt76_rx_tid { struct mt76_rx_tid {
struct rcu_head rcu_head; struct rcu_head rcu_head;
...@@ -617,6 +615,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str, ...@@ -617,6 +615,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len); int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev); void mt76_eeprom_override(struct mt76_dev *dev);
static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
return (u8 *)t - dev->drv->txwi_size;
}
/* increment with wrap-around */ /* increment with wrap-around */
static inline int mt76_incr(int val, int size) static inline int mt76_incr(int val, int size)
{ {
......
...@@ -770,6 +770,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, ...@@ -770,6 +770,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi; struct mt76x02_txwi *txwi;
u8 *txwi_ptr;
if (!e->txwi) { if (!e->txwi) {
dev_kfree_skb_any(e->skb); dev_kfree_skb_any(e->skb);
...@@ -778,7 +779,8 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, ...@@ -778,7 +779,8 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mt76x02_mac_poll_tx_status(dev, false); mt76x02_mac_poll_tx_status(dev, false);
txwi = (struct mt76x02_txwi *) &e->txwi->txwi; txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb); mt76_tx_complete_skb(mdev, e->skb);
......
...@@ -175,7 +175,6 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) ...@@ -175,7 +175,6 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
struct mt76_queue *q; struct mt76_queue *q;
void *status_fifo; void *status_fifo;
BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
......
...@@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev) ...@@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
{ {
struct mt76_txwi_cache *t; struct mt76_txwi_cache *t;
dma_addr_t addr; dma_addr_t addr;
u8 *txwi;
int size; int size;
size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1); size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
t = devm_kzalloc(dev->dev, size, GFP_ATOMIC); txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
if (!t) if (!txwi)
return NULL; return NULL;
addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi), addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr; t->dma_addr = addr;
return t; return t;
...@@ -78,7 +80,7 @@ void mt76_tx_free(struct mt76_dev *dev) ...@@ -78,7 +80,7 @@ void mt76_tx_free(struct mt76_dev *dev)
struct mt76_txwi_cache *t; struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL) while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi), dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment