Commit 0d1bf7a5 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'wireless-drivers-2021-02-26' of...

Merge tag 'wireless-drivers-2021-02-26' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers

Kalle Valo says:

====================
wireless-drivers fixes for v5.12

First set of fixes for v5.12. One iwlwifi kernel crash fix and smaller
fixes to multiple drivers.

ath9k
 * fix Spatial Multiplexing Power Save (SMPS) handling to improve thoughtput

mt76
 * error handling fixes
 * memory leax fixes

iwlwifi
 * don't crash during debug collection on DVM devices

MAINTAINERS
 * email address update

ath11k
 * fix GCC warning about DMA address debug messages
 * fix regression which broke QCA6390 AP mode

* tag 'wireless-drivers-2021-02-26' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers:
  mt76: mt7915: fix unused 'mode' variable
  mt76: dma: do not report truncated frames to mac80211
  mt76: mt7921: remove incorrect error handling
  iwlwifi: pcie: fix iwl_so_trans_cfg link error when CONFIG_IWLMVM is disabled
  ath11k: fix AP mode for QCA6390
  ath11k: qmi: use %pad to format dma_addr_t
  MAINTAINERS: update for mwifiex driver maintainers
  iwlwifi: avoid crash on unsupported debug collection
  mt76: mt7915: only modify tx buffer list after allocating tx token id
  mt76: fix tx skb error handling in mt76_dma_tx_queue_skb
  ath9k: fix transmitting to stations in dynamic SMPS mode
====================

Link: https://lore.kernel.org/r/20210226164411.CDD03C433CA@smtp.codeaurora.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9e8e714f c490492f
...@@ -10679,7 +10679,8 @@ F: drivers/net/ethernet/marvell/mvpp2/ ...@@ -10679,7 +10679,8 @@ F: drivers/net/ethernet/marvell/mvpp2/
MARVELL MWIFIEX WIRELESS DRIVER MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar@gmail.com> M: Amitkumar Karwar <amitkarwar@gmail.com>
M: Ganapathi Bhat <ganapathi.bhat@nxp.com> M: Ganapathi Bhat <ganapathi017@gmail.com>
M: Sharvari Harisangam <sharvari.harisangam@nxp.com>
M: Xinming Hu <huxinming820@gmail.com> M: Xinming Hu <huxinming820@gmail.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
......
...@@ -5450,8 +5450,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, ...@@ -5450,8 +5450,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
} }
if (ab->hw_params.vdev_start_delay && if (ab->hw_params.vdev_start_delay &&
(arvif->vdev_type == WMI_VDEV_TYPE_AP || arvif->vdev_type != WMI_VDEV_TYPE_AP &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) { arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
param.vdev_id = arvif->vdev_id; param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT; param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr; param.peer_addr = ar->mac_addr;
......
...@@ -1687,8 +1687,8 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) ...@@ -1687,8 +1687,8 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
req->mem_seg[i].size = ab->qmi.target_mem[i].size; req->mem_seg[i].size = ab->qmi.target_mem[i].size;
req->mem_seg[i].type = ab->qmi.target_mem[i].type; req->mem_seg[i].type = ab->qmi.target_mem[i].type;
ath11k_dbg(ab, ATH11K_DBG_QMI, ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi req mem_seg[%d] 0x%llx %u %u\n", i, "qmi req mem_seg[%d] %pad %u %u\n", i,
ab->qmi.target_mem[i].paddr, &ab->qmi.target_mem[i].paddr,
ab->qmi.target_mem[i].size, ab->qmi.target_mem[i].size,
ab->qmi.target_mem[i].type); ab->qmi.target_mem[i].type);
} }
......
...@@ -177,7 +177,8 @@ struct ath_frame_info { ...@@ -177,7 +177,8 @@ struct ath_frame_info {
s8 txq; s8 txq;
u8 keyix; u8 keyix;
u8 rtscts_rate; u8 rtscts_rate;
u8 retries : 7; u8 retries : 6;
u8 dyn_smps : 1;
u8 baw_tracked : 1; u8 baw_tracked : 1;
u8 tx_power; u8 tx_power;
enum ath9k_key_type keytype:2; enum ath9k_key_type keytype:2;
......
...@@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, ...@@ -1271,6 +1271,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
is_40, is_sgi, is_sp); is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
if (rix >= 8 && fi->dyn_smps) {
info->rates[i].RateFlags |=
ATH9K_RATESERIES_RTS_CTS;
info->flags |= ATH9K_TXDESC_CTSENA;
}
info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
is_40, false); is_40, false);
...@@ -2114,6 +2119,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, ...@@ -2114,6 +2119,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = an->ps_key; fi->keyix = an->ps_key;
else else
fi->keyix = ATH9K_TXKEYIX_INVALID; fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
fi->keytype = keytype; fi->keytype = keytype;
fi->framelen = framelen; fi->framelen = framelen;
fi->tx_power = txpower; fi->tx_power = txpower;
......
...@@ -205,6 +205,8 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode, ...@@ -205,6 +205,8 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id, enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data) union iwl_dbg_tlv_tp_data *tp_data)
{ {
if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
return;
op_mode->ops->time_point(op_mode, tp_id, tp_data); op_mode->ops->time_point(op_mode, tp_id, tp_data);
} }
......
...@@ -1106,6 +1106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1106,6 +1106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
} }
#if IS_ENABLED(CONFIG_IWLMVM)
/* /*
* Workaround for problematic SnJ device: sometimes when * Workaround for problematic SnJ device: sometimes when
* certain RF modules are connected to SnJ, the device ID * certain RF modules are connected to SnJ, the device ID
...@@ -1116,7 +1118,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1116,7 +1118,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ)
iwl_trans->trans_cfg = &iwl_so_trans_cfg; iwl_trans->trans_cfg = &iwl_so_trans_cfg;
#if IS_ENABLED(CONFIG_IWLMVM)
/* /*
* special-case 7265D, it has the same PCI IDs. * special-case 7265D, it has the same PCI IDs.
* *
......
...@@ -345,7 +345,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -345,7 +345,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
}; };
struct ieee80211_hw *hw; struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM; int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t; struct mt76_txwi_cache *t;
struct sk_buff *iter; struct sk_buff *iter;
dma_addr_t addr; dma_addr_t addr;
...@@ -387,6 +386,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -387,6 +386,11 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
} }
tx_info.nbuf = n; tx_info.nbuf = n;
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
...@@ -395,11 +399,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -395,11 +399,6 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (ret < 0) if (ret < 0)
goto unmap; goto unmap;
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
ret = -ENOMEM;
goto unmap;
}
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
tx_info.info, tx_info.skb, t); tx_info.info, tx_info.skb, t);
...@@ -419,9 +418,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -419,9 +418,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
} }
#endif #endif
e.skb = tx_info.skb; dev_kfree_skb(tx_info.skb);
e.txwi = t;
dev->drv->tx_complete_skb(dev, &e);
mt76_put_txwi(dev, t); mt76_put_txwi(dev, t);
return ret; return ret;
} }
...@@ -515,13 +512,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, ...@@ -515,13 +512,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
{ {
struct sk_buff *skb = q->rx_head; struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
struct page *page = virt_to_head_page(data); struct page *page = virt_to_head_page(data);
int offset = data - page_address(page) + q->buf_offset; int offset = data - page_address(page) + q->buf_offset;
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
q->buf_size);
} else { } else {
skb_free_frag(data); skb_free_frag(data);
} }
...@@ -530,7 +527,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, ...@@ -530,7 +527,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
return; return;
q->rx_head = NULL; q->rx_head = NULL;
dev->drv->rx_skb(dev, q - dev->q_rx, skb); if (nr_frags < ARRAY_SIZE(shinfo->frags))
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
else
dev_kfree_skb(skb);
} }
static int static int
......
...@@ -967,11 +967,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -967,11 +967,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
} }
txp->nbuf = nbuf; txp->nbuf = nbuf;
/* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
if (!key) if (!key)
...@@ -1009,6 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, ...@@ -1009,6 +1004,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->rept_wds_wcid = cpu_to_le16(0x3ff); txp->rept_wds_wcid = cpu_to_le16(0x3ff);
tx_info->skb = DMA_DUMMY_DATA; tx_info->skb = DMA_DUMMY_DATA;
/* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
return 0; return 0;
} }
......
...@@ -543,7 +543,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) ...@@ -543,7 +543,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
tx_cont->bw = CMD_CBW_20MHZ; tx_cont->bw = CMD_CBW_20MHZ;
break; break;
default: default:
break; return -EINVAL;
} }
if (!en) { if (!en) {
...@@ -591,7 +591,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en) ...@@ -591,7 +591,7 @@ mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
mode = MT_PHY_TYPE_HE_MU; mode = MT_PHY_TYPE_HE_MU;
break; break;
default: default:
break; return -EINVAL;
} }
rateval = mode << 6 | rate_idx; rateval = mode << 6 | rate_idx;
......
...@@ -405,10 +405,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, ...@@ -405,10 +405,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
if (wlan_idx >= MT76_N_WCIDS) if (wlan_idx >= MT76_N_WCIDS)
return; return;
wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
if (!wcid) { if (!wcid)
stats->tx_rate = rate;
return; return;
}
msta = container_of(wcid, struct mt7921_sta, wcid); msta = container_of(wcid, struct mt7921_sta, wcid);
stats = &msta->stats; stats = &msta->stats;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment