Commit e9ab0b2e authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2022-11-28' of...

Merge tag 'iwlwifi-next-for-kalle-2022-11-28' of http://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

This is the second pull request intended for v6.2

It contains two patch-sets sent before with the following content:
* iwlwifi EHT adjustments
* double-free fix in tx path
* iwlmei PLDR flow fixes
* iwlmei smatch fixes
* a logging data improvement
parents 2551a922 f31f7cd9
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
......@@ -398,7 +398,7 @@ struct iwl_he_backoff_conf {
* @IWL_HE_PKT_EXT_64QAM: 64-QAM
* @IWL_HE_PKT_EXT_256QAM: 256-QAM
* @IWL_HE_PKT_EXT_1024QAM: 1024-QAM
* @IWL_HE_PKT_EXT_RESERVED: reserved value
* @IWL_HE_PKT_EXT_4096QAM: 4096-QAM, for EHT only
* @IWL_HE_PKT_EXT_NONE: not defined
*/
enum iwl_he_pkt_ext_constellations {
......@@ -408,7 +408,7 @@ enum iwl_he_pkt_ext_constellations {
IWL_HE_PKT_EXT_64QAM,
IWL_HE_PKT_EXT_256QAM,
IWL_HE_PKT_EXT_1024QAM,
IWL_HE_PKT_EXT_RESERVED,
IWL_HE_PKT_EXT_4096QAM,
IWL_HE_PKT_EXT_NONE,
};
......
......@@ -13,10 +13,12 @@
#define PHY_BAND_6 (2)
/* Supported channel width, vary if there is VHT support */
#define PHY_VHT_CHANNEL_MODE20 (0x0)
#define PHY_VHT_CHANNEL_MODE40 (0x1)
#define PHY_VHT_CHANNEL_MODE80 (0x2)
#define PHY_VHT_CHANNEL_MODE160 (0x3)
#define IWL_PHY_CHANNEL_MODE20 0x0
#define IWL_PHY_CHANNEL_MODE40 0x1
#define IWL_PHY_CHANNEL_MODE80 0x2
#define IWL_PHY_CHANNEL_MODE160 0x3
/* and 320 MHz for EHT */
#define IWL_PHY_CHANNEL_MODE320 0x4
/*
* Control channel position:
......@@ -24,20 +26,17 @@
* For VHT - bit-2 marks if the control is lower/upper relative to center-freq
* bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
* center_freq
* For EHT - bit-3 is used for extended distance
* |
* 40Mhz |_______|_______|
* 80Mhz |_______|_______|_______|_______|
* 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
* code 011 010 001 000 | 100 101 110 111
* 40Mhz |____|____|
* 80Mhz |____|____|____|____|
* 160Mhz |____|____|____|____|____|____|____|____|
* 320MHz |____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|
* code 1011 1010 1001 1000 0011 0010 0001 0000 0100 0101 0110 0111 1100 1101 1110 1111
*/
#define PHY_VHT_CTRL_POS_1_BELOW (0x0)
#define PHY_VHT_CTRL_POS_2_BELOW (0x1)
#define PHY_VHT_CTRL_POS_3_BELOW (0x2)
#define PHY_VHT_CTRL_POS_4_BELOW (0x3)
#define PHY_VHT_CTRL_POS_1_ABOVE (0x4)
#define PHY_VHT_CTRL_POS_2_ABOVE (0x5)
#define PHY_VHT_CTRL_POS_3_ABOVE (0x6)
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
#define IWL_PHY_CTRL_POS_ABOVE 0x4
#define IWL_PHY_CTRL_POS_OFFS_EXT 0x8
#define IWL_PHY_CTRL_POS_OFFS_MSK 0x3
/*
* struct iwl_fw_channel_info_v1 - channel information
......
......@@ -36,14 +36,14 @@ enum iwl_tlc_mng_cfg_flags {
* @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
* @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
* @IWL_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel
*/
enum iwl_tlc_mng_cfg_cw {
IWL_TLC_MNG_CH_WIDTH_20MHZ,
IWL_TLC_MNG_CH_WIDTH_40MHZ,
IWL_TLC_MNG_CH_WIDTH_80MHZ,
IWL_TLC_MNG_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
IWL_TLC_MNG_CH_WIDTH_320MHZ,
};
/**
......@@ -64,8 +64,7 @@ enum iwl_tlc_mng_cfg_chains {
* @IWL_TLC_MNG_MODE_HT: enable HT
* @IWL_TLC_MNG_MODE_VHT: enable VHT
* @IWL_TLC_MNG_MODE_HE: enable HE
* @IWL_TLC_MNG_MODE_INVALID: invalid value
* @IWL_TLC_MNG_MODE_NUM: a count of possible modes
* @IWL_TLC_MNG_MODE_EHT: enable EHT
*/
enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_CCK = 0,
......@@ -74,8 +73,7 @@ enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_HT,
IWL_TLC_MNG_MODE_VHT,
IWL_TLC_MNG_MODE_HE,
IWL_TLC_MNG_MODE_INVALID,
IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
IWL_TLC_MNG_MODE_EHT,
};
/**
......
......@@ -218,6 +218,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
type = "HT";
else if (format == RATE_MCS_HE_MSK)
type = "HE";
else if (format == RATE_MCS_EHT_MSK)
type = "EHT";
else
type = "Unknown"; /* shouldn't happen */
......
......@@ -1971,3 +1971,6 @@ MODULE_PARM_DESC(remove_when_gone,
module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
module_param_named(disable_11be, iwlwifi_mod_params.disable_11be, bool, 0444);
MODULE_PARM_DESC(disable_11be, "Disable EHT capabilities (default: false)");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018 Intel Corporation
* Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#ifndef __iwl_eeprom_parse_h__
......@@ -31,6 +31,7 @@ struct iwl_nvm_data {
bool sku_cap_amt_enable;
bool sku_cap_ipan_enable;
bool sku_cap_mimo_disabled;
bool sku_cap_11be_enable;
u16 radio_cfg_type;
u8 radio_cfg_step;
......
......@@ -62,6 +62,7 @@ enum iwl_uapsd_disable {
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
* @enable_ini: enable new FW debug infratructure (INI TLVs)
* @disable_11be: disable EHT capabilities, default = false.
*/
struct iwl_mod_params {
int swcrypto;
......
......@@ -368,6 +368,7 @@ enum {
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
......
......@@ -1542,5 +1542,6 @@ void iwl_trans_free(struct iwl_trans *trans);
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
#endif /* __iwl_trans_h__ */
......@@ -354,6 +354,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
if (mvm->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_AX210) {
/* print these registers regardless of alive fail/success */
IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
}
if (ret) {
struct iwl_trans *trans = mvm->trans;
......@@ -390,7 +404,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
if (ret == -ETIMEDOUT)
if (ret == -ETIMEDOUT && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
......@@ -404,7 +418,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
iwl_mei_alive_notif(!ret);
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
if (ret) {
......@@ -1467,18 +1482,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
return ret;
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
if (!(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK) && iwl_mei_pldr_req())
return ret;
mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK);
if (mvm->pldr_sync && iwl_mei_pldr_req())
return -EBUSY;
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
if (ret != -ERFKILL)
if (ret != -ERFKILL && !mvm->pldr_sync)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
mvm->pldr_sync = false;
iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);
......
......@@ -1105,6 +1105,8 @@ struct iwl_mvm {
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
bool pldr_sync;
};
/* Extract MVM priv from op_mode and _hw */
......
......@@ -1077,6 +1077,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
u32 max_agg;
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
......@@ -1098,12 +1099,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return NULL;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = IEEE80211_MAX_AMPDU_BUF_EHT;
else
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_rx_aggregation_subframes = max_agg;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_tx_aggregation_subframes = max_agg;
op_mode = hw->priv;
......@@ -1882,6 +1888,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (mvm->pldr_sync)
return;
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
&mvm->status))
......
......@@ -14,16 +14,18 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
return PHY_VHT_CHANNEL_MODE20;
return IWL_PHY_CHANNEL_MODE20;
case NL80211_CHAN_WIDTH_40:
return PHY_VHT_CHANNEL_MODE40;
return IWL_PHY_CHANNEL_MODE40;
case NL80211_CHAN_WIDTH_80:
return PHY_VHT_CHANNEL_MODE80;
return IWL_PHY_CHANNEL_MODE80;
case NL80211_CHAN_WIDTH_160:
return PHY_VHT_CHANNEL_MODE160;
return IWL_PHY_CHANNEL_MODE160;
case NL80211_CHAN_WIDTH_320:
return IWL_PHY_CHANNEL_MODE320;
default:
WARN(1, "Invalid channel width=%u", chandef->width);
return PHY_VHT_CHANNEL_MODE20;
return IWL_PHY_CHANNEL_MODE20;
}
}
......@@ -33,34 +35,32 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
*/
u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{
switch (chandef->chan->center_freq - chandef->center_freq1) {
case -70:
return PHY_VHT_CTRL_POS_4_BELOW;
case -50:
return PHY_VHT_CTRL_POS_3_BELOW;
case -30:
return PHY_VHT_CTRL_POS_2_BELOW;
case -10:
return PHY_VHT_CTRL_POS_1_BELOW;
case 10:
return PHY_VHT_CTRL_POS_1_ABOVE;
case 30:
return PHY_VHT_CTRL_POS_2_ABOVE;
case 50:
return PHY_VHT_CTRL_POS_3_ABOVE;
case 70:
return PHY_VHT_CTRL_POS_4_ABOVE;
default:
WARN(1, "Invalid channel definition");
fallthrough;
case 0:
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
u8 ret;
if (offs == 0) {
/*
* The FW is expected to check the control channel position only
* when in HT/VHT and the channel width is not 20MHz. Return
* this value as the default one.
*/
return PHY_VHT_CTRL_POS_1_BELOW;
return 0;
}
/* this results in a value 0-7, i.e. fitting into 0b0111 */
ret = (abs_offs - 10) / 20;
/*
* But we need the value to be in 0b1011 because 0b0100 is
* IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
* IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
*/
ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
((ret & BIT(2)) << 1);
/* and add the above bit */
ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
return ret;
}
/*
......
......@@ -9,9 +9,11 @@
#include "iwl-op-mode.h"
#include "mvm.h"
static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_sta *sta)
{
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
return IWL_TLC_MNG_CH_WIDTH_320MHZ;
case IEEE80211_STA_RX_BW_160:
return IWL_TLC_MNG_CH_WIDTH_160MHZ;
case IEEE80211_STA_RX_BW_80:
......@@ -238,6 +240,122 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss)
{
u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
/* the max nss that can be used,
* is the min with our tx capa and the peer rx capa.
*/
return min(tx, rx);
}
#define MAX_NSS_MCS(mcs_num, rx, tx) \
rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
(tx)->rx_tx_mcs ##mcs_num## _max_nss)
static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
u8 max_nss, u16 mcs_msk)
{
if (max_nss >= 2)
ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
if (max_nss >= 1)
ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
}
static const
struct ieee80211_eht_mcs_nss_supp_bw *
rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw,
const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
{
switch (bw) {
case IWL_TLC_MCS_PER_BW_80:
return &eht_mcs->bw._80;
case IWL_TLC_MCS_PER_BW_160:
return &eht_mcs->bw._160;
case IWL_TLC_MCS_PER_BW_320:
return &eht_mcs->bw._320;
default:
return NULL;
}
}
static void rs_fw_eht_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
&sta->deflink.eht_cap.eht_mcs_nss_supp;
/* our TX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
&sband->iftype_data->eht_cap.eht_mcs_nss_supp;
enum IWL_TLC_MCS_PER_BW bw;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
/* peer is 20Mhz only */
if (!(sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_rx_20 = eht_rx_mcs->only_20mhz;
} else {
mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* nic is 20Mhz only */
if (!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_tx_20 = eht_tx_mcs->only_20mhz;
} else {
mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
}
/* rates for 20/40/80 bw */
bw = IWL_TLC_MCS_PER_BW_80;
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12));
/* rate for 160/320 bw */
for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs);
const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs);
/* got unsuppored index for bw */
if (!mcs_rx || !mcs_tx)
continue;
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10));
rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12));
}
/* the station support only a single receive chain */
if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC ||
sta->deflink.rx_nss < 2)
memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd_v4 *cmd)
......@@ -258,7 +376,10 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
/* HT/VHT rates */
if (he_cap->has_he) {
if (sta->deflink.eht_cap.has_eht) {
cmd->mode = IWL_TLC_MNG_MODE_EHT;
rs_fw_eht_set_enabled_rates(sta, sband, cmd);
} else if (he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap->vht_supported) {
......
......@@ -1215,6 +1215,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
if (WARN_ON_ONCE(!mvmsta))
return -1;
......@@ -1247,8 +1248,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
return ret;
/* skb here is not necessarily same as skb that entered this method,
* so free it explicitly.
*/
if (skb == orig_skb)
ieee80211_free_txskb(mvm->hw, skb);
else
kfree_skb(skb);
/* there was error, but we consumed skb one way or another, so return 0 */
return 0;
}
}
......
......@@ -2052,6 +2052,7 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
struct iwl_trans_pcie_removal {
struct pci_dev *pdev;
struct work_struct work;
bool rescan;
};
static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
......@@ -2060,18 +2061,61 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
struct pci_bus *bus = pdev->bus;
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
pci_lock_rescan_remove();
pci_dev_put(pdev);
pci_stop_and_remove_bus_device(pdev);
if (removal->rescan)
pci_rescan_bus(bus->parent);
pci_unlock_rescan_remove();
kfree(removal);
module_put(THIS_MODULE);
}
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
{
struct iwl_trans_pcie_removal *removal;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
/*
* get a module reference to avoid doing this
* while unloading anyway and to avoid
* scheduling a work with code that's being
* removed.
*/
if (!try_module_get(THIS_MODULE)) {
IWL_ERR(trans,
"Module is being unloaded - abort\n");
return;
}
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
if (!removal) {
module_put(THIS_MODULE);
return;
}
/*
* we don't need to clear this flag, because
* the trans will be freed and reallocated.
*/
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
removal->rescan = rescan;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
}
EXPORT_SYMBOL(iwl_trans_pcie_remove);
/*
* This version doesn't disable BHs but rather assumes they're
* already disabled.
......@@ -2131,47 +2175,12 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
iwl_trans_pcie_dump_regs(trans);
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
struct iwl_trans_pcie_removal *removal;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
goto err;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
/*
* get a module reference to avoid doing this
* while unloading anyway and to avoid
* scheduling a work with code that's being
* removed.
*/
if (!try_module_get(THIS_MODULE)) {
IWL_ERR(trans,
"Module is being unloaded - abort\n");
goto err;
}
removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
if (!removal) {
module_put(THIS_MODULE);
goto err;
}
/*
* we don't need to clear this flag, because
* the trans will be freed and reallocated.
*/
set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
} else {
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
iwl_trans_pcie_remove(trans, false);
else
iwl_write32(trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
}
err:
spin_unlock(&trans_pcie->reg_lock);
return false;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment