Commit cfaae9e6 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Felix Fietkau

mt76: add skb pointer to mt76_tx_info

Pass skb pointer to tx_prepare_skb through mt76_tx_info data structure.
This is a preliminary patch to properly support dma error path for
new chipsets (e.g. 7615)
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent f3950a41
...@@ -290,7 +290,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -290,7 +290,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct mt76_queue *q = dev->q_tx[qid].q; struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_tx_info tx_info = {}; struct mt76_tx_info tx_info = {
.skb = skb,
};
int len, n = 0, ret = -ENOMEM; int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e; struct mt76_queue_entry e;
struct mt76_txwi_cache *t; struct mt76_txwi_cache *t;
...@@ -335,8 +337,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -335,8 +337,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, skb, qid, wcid, sta, ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
&tx_info);
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (ret < 0) if (ret < 0)
...@@ -348,7 +349,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -348,7 +349,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
} }
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, skb, t); tx_info.info, tx_info.skb, t);
unmap: unmap:
for (n--; n > 0; n--) for (n--; n > 0; n--)
...@@ -356,7 +357,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -356,7 +357,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
tx_info.buf[n].len, DMA_TO_DEVICE); tx_info.buf[n].len, DMA_TO_DEVICE);
free: free:
e.skb = skb; e.skb = tx_info.skb;
e.txwi = t; e.txwi = t;
dev->drv->tx_complete_skb(dev, qid, &e); dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t); mt76_put_txwi(dev, t);
......
...@@ -85,6 +85,7 @@ struct mt76_queue_buf { ...@@ -85,6 +85,7 @@ struct mt76_queue_buf {
struct mt76_tx_info { struct mt76_tx_info {
struct mt76_queue_buf buf[32]; struct mt76_queue_buf buf[32];
struct sk_buff *skb;
int nbuf; int nbuf;
u32 info; u32 info;
}; };
...@@ -291,8 +292,7 @@ struct mt76_driver_ops { ...@@ -291,8 +292,7 @@ struct mt76_driver_ops {
void (*update_survey)(struct mt76_dev *dev); void (*update_survey)(struct mt76_dev *dev);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info); struct mt76_tx_info *tx_info);
......
...@@ -912,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, ...@@ -912,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
} }
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info) struct mt76_tx_info *tx_info)
{ {
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key; struct ieee80211_key_conf *key = info->control.hw_key;
int pid; int pid;
...@@ -934,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -934,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7603_wtbl_set_ps(dev, msta, false); mt7603_wtbl_set_ps(dev, msta, false);
} }
pid = mt76_tx_status_skb_add(mdev, wcid, skb); pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock); spin_lock_bh(&dev->mt76.lock);
...@@ -944,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -944,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_unlock_bh(&dev->mt76.lock); spin_unlock_bh(&dev->mt76.lock);
} }
mt7603_mac_write_txwi(dev, txwi_ptr, skb, qid, wcid, sta, pid, key); mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
sta, pid, key);
return 0; return 0;
} }
......
...@@ -221,8 +221,8 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, ...@@ -221,8 +221,8 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort); void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info); struct mt76_tx_info *tx_info);
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
......
...@@ -183,8 +183,8 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); ...@@ -183,8 +183,8 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb); struct sk_buff *skb);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info); struct mt76_tx_info *tx_info);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac); const u8 *mac);
......
...@@ -147,12 +147,12 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) ...@@ -147,12 +147,12 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info) struct mt76_tx_info *tx_info)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt76x02_txwi *txwi = txwi_ptr; struct mt76x02_txwi *txwi = txwi_ptr;
int hdrlen, len, pid, qsel = MT_QSEL_EDCA; int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
...@@ -160,10 +160,10 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -160,10 +160,10 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false); mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
hdrlen = ieee80211_hdrlen(hdr->frame_control); hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - (hdrlen & 2); len = tx_info->skb->len - (hdrlen & 2);
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, skb); pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid; txwi->pktid = pid;
if (pid >= MT_PACKET_ID_FIRST) if (pid >= MT_PACKET_ID_FIRST)
......
...@@ -26,8 +26,8 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, ...@@ -26,8 +26,8 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info); struct mt76_tx_info *tx_info);
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
struct mt76_queue_entry *e); struct mt76_queue_entry *e);
......
...@@ -72,23 +72,23 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) ...@@ -72,23 +72,23 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
} }
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, enum mt76_txq_id qid, enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info) struct mt76_tx_info *tx_info)
{ {
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx); int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi; struct mt76x02_txwi *txwi;
enum mt76_qsel qsel; enum mt76_qsel qsel;
u32 flags; u32 flags;
mt76_insert_hdr_pad(skb); mt76_insert_hdr_pad(tx_info->skb);
txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi)); txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
skb_push(skb, sizeof(struct mt76x02_txwi)); skb_push(tx_info->skb, sizeof(*txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, skb); pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid; txwi->pktid = pid;
if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA) if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
...@@ -101,7 +101,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ...@@ -101,7 +101,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
flags |= MT_TXD_INFO_WIV; flags |= MT_TXD_INFO_WIV;
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
} }
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
......
...@@ -734,7 +734,9 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -734,7 +734,9 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct mt76_queue *q = dev->q_tx[qid].q; struct mt76_queue *q = dev->q_tx[qid].q;
struct urb *urb; struct mt76_tx_info tx_info = {
.skb = skb,
};
u16 idx = q->tail; u16 idx = q->tail;
int err; int err;
...@@ -742,20 +744,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ...@@ -742,20 +744,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
return -ENOSPC; return -ENOSPC;
skb->prev = skb->next = NULL; skb->prev = skb->next = NULL;
err = dev->drv->tx_prepare_skb(dev, NULL, skb, qid, wcid, sta, NULL); err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0) if (err < 0)
return err; return err;
urb = q->entry[idx].urb; err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
err = mt76u_tx_setup_buffers(dev, skb, urb);
if (err < 0) if (err < 0)
return err; return err;
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
urb, mt76u_complete_tx, &q->entry[idx]); q->entry[idx].urb, mt76u_complete_tx,
&q->entry[idx]);
q->tail = (q->tail + 1) % q->ndesc; q->tail = (q->tail + 1) % q->ndesc;
q->entry[idx].skb = skb; q->entry[idx].skb = tx_info.skb;
q->queued++; q->queued++;
return idx; return idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment