Commit 5f606b3e authored by Kalle Valo's avatar Kalle Valo

Merge tag 'mt76-for-kvalo-2022-09-15' of https://github.com/nbd168/wireless

mt76 patches for 6.1

- fixes
- suspend/resume improvements
- tx status reporting improvements
parents 2e405cff cb74c8f8
......@@ -252,6 +252,30 @@ struct mt76_queue_ops {
void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
};
enum mt76_phy_type {
MT_PHY_TYPE_CCK,
MT_PHY_TYPE_OFDM,
MT_PHY_TYPE_HT,
MT_PHY_TYPE_HT_GF,
MT_PHY_TYPE_VHT,
MT_PHY_TYPE_HE_SU = 8,
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
__MT_PHY_TYPE_HE_MAX,
};
struct mt76_sta_stats {
u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
u64 tx_bw[4]; /* 20, 40, 80, 160 */
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
u64 tx_bytes;
u32 tx_packets;
u32 tx_retries;
u32 tx_failed;
};
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
......@@ -299,6 +323,8 @@ struct mt76_wcid {
struct list_head list;
struct idr pktid;
struct mt76_sta_stats stats;
};
struct mt76_txq {
......@@ -342,7 +368,8 @@ struct mt76_rx_tid {
#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
#define MT_PACKET_ID_WED 2
#define MT_PACKET_ID_FIRST 3
#define MT_PACKET_ID_HAS_RATE BIT(7)
/* This is timer for when to give up when waiting for TXS callback,
* with starting time being the time at which the DMA_DONE callback
......@@ -527,7 +554,6 @@ struct mt76_usb {
struct mt76_reg_pair *rp;
int rp_len;
u32 base;
bool burst;
} mcu;
};
......@@ -815,26 +841,6 @@ struct mt76_power_limits {
s8 ru[7][12];
};
enum mt76_phy_type {
MT_PHY_TYPE_CCK,
MT_PHY_TYPE_OFDM,
MT_PHY_TYPE_HT,
MT_PHY_TYPE_HT_GF,
MT_PHY_TYPE_VHT,
MT_PHY_TYPE_HE_SU = 8,
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
__MT_PHY_TYPE_HE_MAX,
};
struct mt76_sta_stats {
u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
u64 tx_bw[4]; /* 20, 40, 80, 160 */
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
};
struct mt76_ethtool_worker_info {
u64 *data;
int idx;
......
......@@ -1195,12 +1195,16 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
mt7615_mutex_acquire(dev);
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
mt7615_mutex_release(dev);
}
#ifdef CONFIG_PM
......
......@@ -83,6 +83,7 @@ static int mt7663s_probe(struct sdio_func *func,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
......@@ -180,7 +181,6 @@ static void mt7663s_remove(struct sdio_func *func)
mt76_free_device(&dev->mt76);
}
#ifdef CONFIG_PM
static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
......@@ -235,28 +235,20 @@ static int mt7663s_resume(struct device *dev)
return err;
}
static const struct dev_pm_ops mt7663s_pm_ops = {
.suspend = mt7663s_suspend,
.resume = mt7663s_resume,
};
#endif
MODULE_DEVICE_TABLE(sdio, mt7663s_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
static DEFINE_SIMPLE_DEV_PM_OPS(mt7663s_pm_ops, mt7663s_suspend, mt7663s_resume);
static struct sdio_driver mt7663s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7663s_probe,
.remove = mt7663s_remove,
.id_table = mt7663s_table,
#ifdef CONFIG_PM
.drv = {
.pm = &mt7663s_pm_ops,
}
#endif
.drv.pm = pm_sleep_ptr(&mt7663s_pm_ops),
};
module_sdio_driver(mt7663s_driver);
......
......@@ -119,6 +119,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
.rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
......
......@@ -63,6 +63,12 @@ enum {
REPEATER_BSSID_MAX = 0x3f,
};
struct mt76_connac_reg_map {
u32 phys;
u32 maps;
u32 size;
};
struct mt76_connac_pm {
bool enable:1;
bool enable_user:1;
......@@ -348,9 +354,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
int pid, __le32 *txs_data,
struct mt76_sta_stats *stats);
int pid, __le32 *txs_data);
void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
struct sk_buff *skb,
__le32 *rxv, u32 mode);
......
......@@ -158,6 +158,14 @@ enum {
#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
/* PPDU based TXS */
#define MT_TXS5_MPDU_TX_BYTE GENMASK(22, 0)
#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
/* RXD DW1 */
#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
......
......@@ -490,6 +490,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
/* counting non-offloading skbs */
wcid->stats.tx_bytes += skb->len;
wcid->stats.tx_packets++;
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
......@@ -550,35 +554,29 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
int pid, __le32 *txs_data,
struct mt76_sta_stats *stats)
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data)
{
struct mt76_sta_stats *stats = &wcid->stats;
struct ieee80211_supported_band *sband;
struct mt76_phy *mphy;
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
struct sk_buff *skb;
bool cck = false;
u32 txrate, txs, mode;
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (!skb)
goto out;
txs = le32_to_cpu(txs_data[0]);
info = IEEE80211_SKB_CB(skb);
if (!(txs & MT_TXS0_ACK_ERROR_MASK))
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ampdu_len = 1;
info->status.ampdu_ack_len = !!(info->flags &
IEEE80211_TX_STAT_ACK);
info->status.rates[0].idx = -1;
/* PPDU based reporting */
if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
stats->tx_bytes +=
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
stats->tx_packets +=
le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
stats->tx_failed +=
le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT);
stats->tx_retries +=
le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_CNT);
}
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
......@@ -613,7 +611,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
if (rate.mcs > 31)
goto out;
return false;
rate.flags = RATE_INFO_FLAGS_MCS;
if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
......@@ -621,7 +619,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
break;
case MT_PHY_TYPE_VHT:
if (rate.mcs > 9)
goto out;
return false;
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
break;
......@@ -630,14 +628,14 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HE_TB:
case MT_PHY_TYPE_HE_MU:
if (rate.mcs > 11)
goto out;
return false;
rate.he_gi = wcid->rate.he_gi;
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
rate.flags = RATE_INFO_FLAGS_HE_MCS;
break;
default:
goto out;
return false;
}
stats->tx_mode[mode]++;
......@@ -662,10 +660,34 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
wcid->rate = rate;
out:
if (skb)
mt76_tx_status_skb_done(dev, skb, &list);
return true;
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_txs);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
int pid, __le32 *txs_data)
{
struct sk_buff_head list;
struct sk_buff *skb;
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool noacked = !(info->flags & IEEE80211_TX_STAT_ACK);
if (!(le32_to_cpu(txs_data[0]) & MT_TXS0_ACK_ERROR_MASK))
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ampdu_len = 1;
info->status.ampdu_ack_len = !noacked;
info->status.rates[0].idx = -1;
wcid->stats.tx_failed += noacked;
mt76_connac2_mac_fill_txs(dev, wcid, txs_data);
mt76_tx_status_skb_done(dev, skb, &list);
}
mt76_tx_status_unlock(dev, &list);
return !!skb;
......
......@@ -260,8 +260,10 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
if (sta_hdr)
le16_add_cpu(&sta_hdr->len, len);
if (sta_hdr) {
len += le16_to_cpu(sta_hdr->len);
sta_hdr->len = cpu_to_le16(len);
}
return ptlv;
}
......@@ -2648,7 +2650,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
{
......@@ -2886,6 +2888,10 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
goto out;
}
snprintf(dev->hw->wiphy->fw_version,
sizeof(dev->hw->wiphy->fw_version),
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
release_firmware(fw);
if (!fw_wa)
......
......@@ -10,6 +10,7 @@
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_ENCRY_MODE BIT(4)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
#define FW_FEATURE_NON_DL BIT(6)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
......@@ -33,6 +34,12 @@
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
enum {
FW_TYPE_DEFAULT = 0,
FW_TYPE_CLC = 2,
FW_TYPE_MAX_NUM = 255
};
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
......@@ -174,7 +181,8 @@ struct mt76_connac2_fw_region {
__le32 addr;
__le32 len;
u8 feature_set;
u8 rsv1[15];
u8 type;
u8 rsv1[14];
} __packed;
struct tlv {
......@@ -1172,6 +1180,7 @@ enum {
MCU_CE_CMD_SET_ROC = 0x1c,
MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
MCU_CE_CMD_SET_CLC = 0x5c,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
......
......@@ -21,29 +21,16 @@ static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
struct mt76_usb *usb = &dev->usb;
u32 reg, val;
int i;
if (usb->mcu.burst) {
WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
reg = usb->mcu.rp[0].reg - usb->mcu.base;
for (i = 0; i < usb->mcu.rp_len; i++) {
val = get_unaligned_le32(data + 4 * i);
usb->mcu.rp[i].reg = reg++;
usb->mcu.rp[i].value = val;
}
} else {
WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
for (i = 0; i < usb->mcu.rp_len; i++) {
reg = get_unaligned_le32(data + 8 * i) -
usb->mcu.base;
val = get_unaligned_le32(data + 8 * i + 4);
WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
usb->mcu.rp[i].value = val;
}
WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
for (i = 0; i < usb->mcu.rp_len; i++) {
u32 reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base;
u32 val = get_unaligned_le32(data + 8 * i + 4);
WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
usb->mcu.rp[i].value = val;
}
}
......@@ -207,7 +194,6 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = data;
usb->mcu.rp_len = n;
usb->mcu.base = base;
usb->mcu.burst = false;
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
......
......@@ -23,9 +23,9 @@ mt7915_implicit_txbf_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return -EBUSY;
/* The existing connected stations shall reconnect to apply
* new implicit txbf configuration.
*/
dev->ibf = !!val;
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
......
......@@ -176,7 +176,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
/*
* We don't support reading GI info from txs packets.
* For accurate tx status reporting and AQL improvement,
we need to make sure that flags match so polling GI
* we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
rate = &msta->wcid.rate;
......@@ -232,7 +232,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
struct mt7915_sta *msta;
struct mt7915_sta *msta = NULL;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
......@@ -1001,7 +1001,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
if (pid < MT_PACKET_ID_FIRST)
if (pid < MT_PACKET_ID_WED)
return;
if (wcidx >= mt7915_wtbl_size(dev))
......@@ -1015,8 +1015,11 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
msta = container_of(wcid, struct mt7915_sta, wcid);
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
&msta->stats);
if (pid == MT_PACKET_ID_WED)
mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
else
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
......@@ -1047,7 +1050,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
mt7915_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
mt7915_debugfs_rx_fw_monitor(dev, data, len);
......@@ -1084,7 +1087,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
break;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_FW_MONITOR:
......@@ -2071,8 +2074,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flowid = ffs(~msta->twt.flowid_mask) - 1;
le16p_replace_bits(&twt_agrt->req_type, flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
twt_agrt->req_type |= le16_encode_bits(flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
......@@ -2122,8 +2126,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt_agrt->req_type |=
le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
(twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
}
......
......@@ -1010,6 +1010,23 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
/* offloading flows bypass networking stack, so driver counts and
* reports sta statistics via NL80211_STA_INFO when WED is active.
*/
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
sinfo->tx_packets = msta->wcid.stats.tx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_failed = msta->wcid.stats.tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
sinfo->tx_retries = msta->wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
}
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
......@@ -1224,7 +1241,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
mt76_ethtool_worker(wi, &msta->stats);
mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
......
......@@ -1360,7 +1360,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct sta_phy phy = {};
int ret, nrates = 0;
#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \
#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
do { \
u8 i, gi = mask->control[band]._gi; \
gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
......@@ -1373,15 +1373,17 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
continue; \
nrates += hweight16(mask->control[band]._mcs[i]); \
phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
if (_ht) \
phy.mcs += 8 * i; \
} \
} while (0)
if (sta->deflink.he_cap.has_he) {
__sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
__sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
} else if (sta->deflink.vht_cap.vht_supported) {
__sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
__sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
} else if (sta->deflink.ht_cap.ht_supported) {
__sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
__sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
phy.mcs = ffs(mask->control[band].legacy) - 1;
......
......@@ -127,8 +127,6 @@ struct mt7915_sta {
unsigned long jiffies;
unsigned long ampdu_state;
struct mt76_sta_stats stats;
struct mt76_connac_sta_key_conf bip;
struct {
......
......@@ -99,6 +99,7 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
......@@ -112,18 +113,38 @@ static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
if (!ret)
return -EAGAIN;
phy = &dev->phy;
mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
return 0;
}
static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
struct mt7915_phy *phy;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
/* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
* MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
*/
phy = &dev->phy;
mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
if (phy)
mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
MT_AGG_ACR_PPDU_TXS2H);
}
#endif
......
......@@ -4,17 +4,11 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
struct __map {
u32 phys;
u32 maps;
u32 size;
};
/* used to differentiate between generations */
struct mt7915_reg_desc {
const u32 *reg_rev;
const u32 *offs_rev;
const struct __map *map;
const struct mt76_connac_reg_map *map;
u32 map_size;
};
......@@ -52,6 +46,7 @@ enum offs_rev {
AGG_AWSCR0,
AGG_PCR0,
AGG_ACR0,
AGG_ACR4,
AGG_MRCR,
AGG_ATCR1,
AGG_ATCR3,
......@@ -471,6 +466,9 @@ enum offs_rev {
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR4))
#define MT_AGG_ACR_PPDU_TXS2H BIT(1)
#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
......
......@@ -13,6 +13,7 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
acpi_handle root, handle;
acpi_status status;
u32 i = 0;
int ret;
root = ACPI_HANDLE(mdev->dev);
if (!root)
......@@ -52,9 +53,11 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
*(*tbl + i) = (u8)sar_unit->integer.value;
}
free:
ret = (i == sar_root->package.count) ? 0 : -EINVAL;
kfree(sar_root);
return (i == sar_root->package.count) ? 0 : -EINVAL;
return ret;
}
/* MTCL : Country List Table for 6G band */
......
......@@ -11,12 +11,15 @@ enum mt7921_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_WIFI_CONF = 0x07c,
__MT_EE_MAX = 0x3bf
MT_EE_HW_TYPE = 0x55b,
__MT_EE_MAX = 0x9ff
};
#define MT_EE_WIFI_CONF_TX_MASK BIT(0)
#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(3, 2)
#define MT_EE_HW_TYPE_ENCAP BIT(0)
enum mt7921_eeprom_band {
MT_EE_NA,
MT_EE_5GHZ,
......
......@@ -39,6 +39,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
mt76_connac_mcu_set_channel_domain(hw->priv);
mt7921_set_tx_sar_pwr(hw, NULL);
mt7921_mutex_release(dev);
......
......@@ -235,7 +235,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
struct mt7921_sta *msta;
struct mt7921_sta *msta = NULL;
u16 seq_ctrl = 0;
__le16 fc = 0;
u8 mode = 0;
......@@ -486,7 +486,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return 0;
}
void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
u16 fc, tid;
......@@ -509,7 +509,6 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
......@@ -539,8 +538,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
msta = container_of(wcid, struct mt7921_sta, wcid);
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
&msta->stats);
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
......@@ -552,7 +550,134 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, bool clear_status,
struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
__le32 *txwi;
u16 wcid_idx;
mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (sta) {
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7921_tx_check_aggr(sta, txwi);
wcid_idx = wcid->idx;
} else {
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
out:
t->skb = NULL;
mt76_put_txwi(mdev, t);
}
EXPORT_SYMBOL_GPL(mt7921_txwi_free);
static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
struct mt76_connac_tx_free *free = data;
__le32 *tx_info = (__le32 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb, *tmp;
void *end = data + len;
LIST_HEAD(free_list);
bool wake = false;
u8 i, count;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
if (WARN_ON_ONCE((void *)&tx_info[count] > end))
return;
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(tx_info[i]);
u8 stat;
/* 1'b1: new wcid pair.
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
if (info & MT_TX_FREE_PAIR) {
struct mt7921_sta *msta;
struct mt76_wcid *wcid;
u16 idx;
count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
if (!sta)
continue;
msta = container_of(wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
continue;
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
}
rcu_read_lock();
mt7921_mac_sta_poll(dev);
rcu_read_unlock();
mt76_worker_schedule(&dev->mt76.tx_worker);
}
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)data;
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
mt7921_mac_tx_free(dev, data, len); /* mmio */
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7921_mac_add_txs(dev, rxd);
return false;
default:
return true;
}
}
EXPORT_SYMBOL_GPL(mt7921_rx_check);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
......@@ -570,6 +695,11 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
type = PKT_TYPE_NORMAL_MCU;
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
/* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
mt7921_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
case PKT_TYPE_RX_EVENT:
mt7921_mcu_rx_event(dev, skb);
break;
......@@ -780,6 +910,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
void mt7921_reset(struct mt76_dev *mdev)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt76_connac_pm *pm = &dev->pm;
if (!dev->hw_init_done)
return;
......@@ -787,8 +918,12 @@ void mt7921_reset(struct mt76_dev *mdev)
if (dev->hw_full_reset)
return;
if (pm->suspended)
return;
queue_work(dev->mt76.wq, &dev->reset_work);
}
EXPORT_SYMBOL_GPL(mt7921_reset);
void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
{
......
......@@ -752,6 +752,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
......@@ -1045,7 +1046,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
mt76_ethtool_worker(wi, &msta->stats);
mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
......@@ -1404,6 +1405,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mt7921_mutex_acquire(dev);
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
......@@ -1411,6 +1414,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
mt7921_mutex_release(dev);
}
#if IS_ENABLED(CONFIG_IPV6)
......@@ -1526,17 +1531,23 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
mt7921_mutex_acquire(dev);
err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
true);
if (err)
return err;
goto out;
err = mt7921_mcu_set_bss_pm(dev, vif, true);
if (err)
return err;
goto out;
err = mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
out:
mt7921_mutex_release(dev);
return mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
return err;
}
static void
......@@ -1548,11 +1559,16 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
mt7921_mutex_acquire(dev);
err = mt7921_mcu_set_bss_pm(dev, vif, false);
if (err)
return;
goto out;
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
out:
mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
......
......@@ -2,14 +2,20 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/fs.h>
#include <linux/firmware.h>
#include "mt7921.h"
#include "mt7921_trace.h"
#include "eeprom.h"
#include "mcu.h"
#include "mac.h"
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
static bool mt7921_disable_clc;
module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
MODULE_PARM_DESC(disable_clc, "disable CLC support");
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
......@@ -84,6 +90,27 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
}
EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
{
struct mt7921_mcu_eeprom_info *res, req = {
.addr = cpu_to_le32(round_down(offset,
MT7921_EEPROM_BLOCK_SIZE)),
};
struct sk_buff *skb;
int ret;
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
&req, sizeof(req), true, &skb);
if (ret)
return ret;
res = (struct mt7921_mcu_eeprom_info *)skb->data;
*val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
dev_kfree_skb(skb);
return 0;
}
#ifdef CONFIG_PM
static int
......@@ -354,6 +381,90 @@ static char *mt7921_ram_name(struct mt7921_dev *dev)
return ret;
}
static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
{
const struct mt76_connac2_fw_trailer *hdr;
const struct mt76_connac2_fw_region *region;
const struct mt7921_clc *clc;
struct mt76_dev *mdev = &dev->mt76;
struct mt7921_phy *phy = &dev->phy;
const struct firmware *fw;
int ret, i, len, offset = 0;
u8 *clc_base = NULL, hw_encap = 0;
if (mt7921_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
if (mt76_is_mmio(&dev->mt76)) {
ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
if (ret)
return ret;
hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
}
ret = request_firmware(&fw, fw_name, mdev->dev);
if (ret)
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(mdev->dev, "Invalid firmware\n");
ret = -EINVAL;
goto out;
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
for (i = 0; i < hdr->n_region; i++) {
region = (const void *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
len = le32_to_cpu(region->len);
/* check if we have valid buffer size */
if (offset + len > fw->size) {
dev_err(mdev->dev, "Invalid firmware region\n");
ret = -EINVAL;
goto out;
}
if ((region->feature_set & FW_FEATURE_NON_DL) &&
region->type == FW_TYPE_CLC) {
clc_base = (u8 *)(fw->data + offset);
break;
}
offset += len;
}
if (!clc_base)
goto out;
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7921_clc *)(clc_base + offset);
/* do not init buf again if chip reset triggered */
if (phy->clc[clc->idx])
continue;
/* header content sanity */
if (clc->idx == MT7921_CLC_POWER &&
u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
continue;
phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
le32_to_cpu(clc->len),
GFP_KERNEL);
if (!phy->clc[clc->idx]) {
ret = -ENOMEM;
goto out;
}
}
ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
out:
release_firmware(fw);
return ret;
}
static int mt7921_load_firmware(struct mt7921_dev *dev)
{
int ret;
......@@ -423,6 +534,10 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
err = mt7921_load_clc(dev, mt7921_ram_name(dev));
if (err)
return err;
return mt7921_mcu_fw_log_2_host(dev, 1);
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
......@@ -930,3 +1045,86 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}
static
int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
enum environment_cap env_cap,
struct mt7921_clc *clc,
u8 idx)
{
struct sk_buff *skb;
struct {
u8 ver;
u8 pad0;
__le16 len;
u8 idx;
u8 env;
u8 pad1[2];
u8 alpha2[2];
u8 type[2];
u8 rsvd[64];
} __packed req = {
.idx = idx,
.env = env_cap,
};
int ret, valid_cnt = 0;
u8 i, *pos;
if (!clc)
return 0;
pos = clc->data;
for (i = 0; i < clc->nr_country; i++) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
u16 len = le16_to_cpu(rule->len);
pos += len + sizeof(*rule);
if (rule->alpha2[0] != alpha2[0] ||
rule->alpha2[1] != alpha2[1])
continue;
memcpy(req.alpha2, rule->alpha2, 2);
memcpy(req.type, rule->type, 2);
req.len = cpu_to_le16(sizeof(req) + len);
skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
le16_to_cpu(req.len),
sizeof(req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_put_data(skb, rule->data, len);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_CE_CMD(SET_CLC), false);
if (ret < 0)
return ret;
valid_cnt++;
}
if (!valid_cnt)
return -ENOENT;
return 0;
}
int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
enum environment_cap env_cap)
{
struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
int i, ret;
/* submit all clc config */
for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
phy->clc[i], i);
/* If no country found, set "00" as default */
if (ret == -ENOENT)
ret = __mt7921_mcu_set_clc(dev, "00",
ENVIRON_INDOOR,
phy->clc[i], i);
if (ret < 0)
return ret;
}
return 0;
}
......@@ -41,7 +41,7 @@ enum {
struct mt7921_mcu_eeprom_info {
__le32 addr;
__le32 valid;
u8 data[16];
u8 data[MT7921_EEPROM_BLOCK_SIZE];
} __packed;
#define MT_RA_RATE_NSS GENMASK(8, 6)
......
......@@ -41,6 +41,8 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
#define MT7921_EEPROM_BLOCK_SIZE 16
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
......@@ -100,7 +102,6 @@ struct mt7921_sta {
unsigned long last_txs;
unsigned long ampdu_state;
struct mt76_sta_stats stats;
struct mt76_connac_sta_key_conf bip;
};
......@@ -149,6 +150,29 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
enum {
MT7921_CLC_POWER,
MT7921_CLC_CHAN,
MT7921_CLC_MAX_NUM,
};
struct mt7921_clc_rule {
u8 alpha2[2];
u8 type[2];
__le16 len;
u8 data[];
} __packed;
struct mt7921_clc {
__le32 len;
u8 idx;
u8 ver;
u8 nr_country;
u8 type;
u8 rsv[8];
u8 data[];
};
struct mt7921_phy {
struct mt76_phy *mt76;
struct mt7921_dev *dev;
......@@ -174,6 +198,8 @@ struct mt7921_phy {
#ifdef CONFIG_ACPI
struct mt7921_acpi_sar *acpisar;
#endif
struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
......@@ -380,6 +406,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_token_put(struct mt7921_dev *dev);
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
......@@ -410,14 +437,13 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, bool clear_status,
struct list_head *free_list);
void mt7921_mac_sta_poll(struct mt7921_dev *dev);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921e_mac_reset(struct mt7921_dev *dev);
int mt7921e_mcu_init(struct mt7921_dev *dev);
......@@ -479,4 +505,7 @@ mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
#endif
int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
enum environment_cap env_cap);
#endif
......@@ -123,54 +123,51 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
static const struct {
u32 phys;
u32 mapped;
u32 size;
} fixed_map[] = {
static const struct mt76_connac_reg_map fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
{ 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
{ 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
{ 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
{ 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
{ 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
{ 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
{ 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
{ 0x74030000, 0x10000, 0x10000 }, /* PCIE_MAC_IREG */
{ 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
{ 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
{ 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
{ 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
{ 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
{ 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
{ 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
{ 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
{ 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
{ 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
{ 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
{ 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
{ 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
......@@ -187,7 +184,7 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
if (ofs > fixed_map[i].size)
continue;
return fixed_map[i].mapped + ofs;
return fixed_map[i].maps + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
......@@ -238,8 +235,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_check = mt7921_rx_check,
.rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
......@@ -288,6 +285,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
goto err_free_pci_vec;
}
pci_set_drvdata(pdev, mdev);
dev = container_of(mdev, struct mt7921_dev, mt76);
dev->hif_ops = &mt7921_pcie_ops;
......@@ -367,6 +366,7 @@ static int mt7921_pci_suspend(struct device *device)
int i, err;
pm->suspended = true;
flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
......@@ -409,9 +409,6 @@ static int mt7921_pci_suspend(struct device *device)
if (err)
goto restore_napi;
if (err)
goto restore_napi;
return 0;
restore_napi:
......@@ -428,6 +425,9 @@ static int mt7921_pci_suspend(struct device *device)
restore_suspend:
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return err;
}
......@@ -441,7 +441,7 @@ static int mt7921_pci_resume(struct device *device)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
return err;
goto failed;
mt7921_wpdma_reinit_cond(dev);
......@@ -471,11 +471,12 @@ static int mt7921_pci_resume(struct device *device)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
if (err)
return err;
failed:
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return err;
}
......
......@@ -53,154 +53,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
static void
mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, bool clear_status,
struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
__le32 *txwi;
u16 wcid_idx;
mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (sta) {
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
mt7921_tx_check_aggr(sta, txwi);
wcid_idx = wcid->idx;
} else {
wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
}
__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
out:
t->skb = NULL;
mt76_put_txwi(mdev, t);
}
static void
mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
struct mt76_connac_tx_free *free = data;
__le32 *tx_info = (__le32 *)(data + sizeof(*free));
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb, *tmp;
void *end = data + len;
LIST_HEAD(free_list);
bool wake = false;
u8 i, count;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
if (WARN_ON_ONCE((void *)&tx_info[count] > end))
return;
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(tx_info[i]);
u8 stat;
/* 1'b1: new wcid pair.
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
if (info & MT_TX_FREE_PAIR) {
struct mt7921_sta *msta;
struct mt76_wcid *wcid;
u16 idx;
count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
if (!sta)
continue;
msta = container_of(wcid, struct mt7921_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
continue;
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
}
rcu_read_lock();
mt7921_mac_sta_poll(dev);
rcu_read_unlock();
mt76_worker_schedule(&dev->mt76.tx_worker);
}
bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)data;
__le32 *end = (__le32 *)&rxd[len / 4];
enum rx_pkt_type type;
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7921e_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7921_mac_add_txs(dev, rxd);
return false;
default:
return true;
}
}
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
__le32 *rxd = (__le32 *)skb->data;
enum rx_pkt_type type;
type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
mt7921e_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
default:
mt7921_queue_rx_skb(mdev, q, skb);
break;
}
}
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
......
......@@ -30,12 +30,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
cmd == MCU_UNI_CMD(SUSPEND) ||
cmd == MCU_UNI_CMD(OFFLOAD))
mdev->mcu.timeout = HZ;
else
mdev->mcu.timeout = 3 * HZ;
mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
......@@ -59,6 +54,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
if (err)
return err;
mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1);
err = mt7921_run_firmware(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
......
......@@ -440,6 +440,8 @@
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194)
#define MT_PCIE_MAC_PM_L0S_DIS BIT(8)
#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
......
......@@ -96,6 +96,7 @@ static int mt7921s_probe(struct sdio_func *func,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
......@@ -194,7 +195,6 @@ static void mt7921s_remove(struct sdio_func *func)
mt7921s_unregister_device(dev);
}
#ifdef CONFIG_PM
static int mt7921s_suspend(struct device *__dev)
{
struct sdio_func *func = dev_to_sdio_func(__dev);
......@@ -206,6 +206,7 @@ static int mt7921s_suspend(struct device *__dev)
pm->suspended = true;
set_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
......@@ -261,6 +262,9 @@ static int mt7921s_suspend(struct device *__dev)
clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return err;
}
......@@ -276,7 +280,7 @@ static int mt7921s_resume(struct device *__dev)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
return err;
goto failed;
mt76_worker_enable(&mdev->tx_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
......@@ -288,34 +292,27 @@ static int mt7921s_resume(struct device *__dev)
mt76_connac_mcu_set_deep_sleep(mdev, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
if (err)
return err;
failed:
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return err;
}
static const struct dev_pm_ops mt7921s_pm_ops = {
.suspend = mt7921s_suspend,
.resume = mt7921s_resume,
};
#endif
MODULE_DEVICE_TABLE(sdio, mt7921s_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
static DEFINE_SIMPLE_DEV_PM_OPS(mt7921s_pm_ops, mt7921s_suspend, mt7921s_resume);
static struct sdio_driver mt7921s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7921s_probe,
.remove = mt7921s_remove,
.id_table = mt7921s_table,
#ifdef CONFIG_PM
.drv = {
.pm = &mt7921s_pm_ops,
}
#endif
.drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
......
......@@ -33,12 +33,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
cmd == MCU_UNI_CMD(SUSPEND) ||
cmd == MCU_UNI_CMD(OFFLOAD))
mdev->mcu.timeout = HZ;
else
mdev->mcu.timeout = 3 * HZ;
mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
......
......@@ -106,12 +106,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
cmd == MCU_UNI_CMD(SUSPEND) ||
cmd == MCU_UNI_CMD(OFFLOAD))
mdev->mcu.timeout = HZ;
else
mdev->mcu.timeout = 3 * HZ;
mdev->mcu.timeout = 3 * HZ;
if (cmd != MCU_CMD(FW_SCATTER))
ep = MT_EP_OUT_INBAND_CMD;
......@@ -183,6 +178,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
......@@ -300,23 +296,34 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf)
static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
int err;
pm->suspended = true;
flush_work(&dev->reset_work);
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
if (err)
return err;
goto failed;
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
set_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
return 0;
failed:
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return err;
}
static int mt7921u_resume(struct usb_interface *intf)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
struct mt76_connac_pm *pm = &dev->pm;
bool reinit = true;
int err, i;
......@@ -338,16 +345,21 @@ static int mt7921u_resume(struct usb_interface *intf)
if (reinit || mt7921_dma_need_reinit(dev)) {
err = mt7921u_dma_init(dev, true);
if (err)
return err;
goto failed;
}
clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
return err;
goto failed;
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
failed:
pm->suspended = false;
if (err < 0)
mt7921_reset(&dev->mt76);
return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
return err;
}
#endif /* CONFIG_PM */
......
......@@ -478,14 +478,14 @@ static void mt76s_status_worker(struct mt76_worker *w)
if (ndata_frames > 0)
resched = true;
if (dev->drv->tx_status_data &&
if (dev->drv->tx_status_data && ndata_frames > 0 &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
!test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
queue_work(dev->wq, &dev->sdio.stat_work);
ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
} while (nframes > 0);
if (resched)
mt76_worker_schedule(&dev->sdio.txrx_worker);
mt76_worker_schedule(&dev->tx_worker);
}
static void mt76s_tx_status_data(struct work_struct *work)
......@@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
queue_work(dev->wq, &sdio->stat_work);
ieee80211_queue_work(dev->hw, &sdio->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
......
......@@ -85,7 +85,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i;
struct page *page;
u8 *buf;
u8 *buf, *end;
for (i = 0; i < intr->rx.num[qid]; i++)
len += round_up(intr->rx.len[qid][i] + 4, 4);
......@@ -112,20 +112,29 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return err;
}
for (i = 0; i < intr->rx.num[qid]; i++) {
end = buf + len;
i = 0;
while (i < intr->rx.num[qid] && buf < end) {
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
len = le32_get_bits(rxd[0], GENMASK(15, 0));
e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
if (!e->skb)
break;
/* Optimized path for TXS */
if (!dev->drv->rx_check || dev->drv->rx_check(dev, buf, len)) {
e->skb = mt76s_build_rx_skb(buf, len,
round_up(len + 4, 4));
if (!e->skb)
break;
if (q->queued + i + 1 == q->ndesc)
break;
i++;
}
buf += round_up(len + 4, 4);
if (q->queued + i + 1 == q->ndesc)
break;
}
put_page(page);
......
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include <linux/random.h>
#include "mt76.h"
const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
......@@ -123,12 +125,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
if (!head)
return -ENOMEM;
hdr = __skb_put_zero(head, head_len);
hdr = __skb_put_zero(head, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
head_len - sizeof(*hdr));
info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
......@@ -154,7 +158,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
return -ENOMEM;
}
__skb_put_zero(frag, frag_len);
get_random_bytes(__skb_put(frag, frag_len), frag_len);
head->len += frag->len;
head->data_len += frag->len;
......
......@@ -528,6 +528,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
data_len = min_t(int, len, data_len - head_room);
if (len == data_len &&
dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
return 0;
skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment