Commit bc11517b authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2021-12-21-v2' of...

Merge tag 'iwlwifi-next-for-kalle-2021-12-21-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

wlwifi patches for v5.17 v2

* Support for Time-Aware-SAR (TAS) as read from the BIOS;
* Fix scan timeout issue when 6GHz is enabled;
* Work continues for new HW family Bz;
* Support for Optimized Connectivity Experience (OCE) scan;
* A bunch of FW debugging improvements and fixes;
* Fix one 32-bit compilation issue;
* Some RX changes for new HW family
* Some fixes for 6 GHz scan;
* Fix SAR table fixes with newer platforms;
* Fix early restart crash;
* Small fix in the debugging code;
* Add new Killer device IDs;
* Datapath updates for Bz family continues;
* A couple of important fixes in iwlmei;
* Some other small fixes, clean-ups and improvements.
parents 8b144ded bcbddc4f
......@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 68
#define IWL_22000_UCODE_API_MAX 69
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
......@@ -56,6 +56,11 @@
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
......@@ -116,6 +121,16 @@
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_Z_GF_A_MODULE_FIRMWARE(api) \
IWL_BZ_Z_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
......@@ -237,7 +252,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dccm2_len = IWL_22000_DCCM2_LEN, \
.smem_offset = IWL_22000_SMEM_OFFSET, \
.smem_len = IWL_22000_SMEM_LEN, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \
.apmg_not_supported = true, \
.trans.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
......@@ -882,6 +897,40 @@ const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = {
.fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = {
.fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = {
.fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = {
.fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
.fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
......@@ -910,3 +959,8 @@ MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
......@@ -242,17 +242,16 @@ union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *block_list_array,
int *block_list_size)
struct iwl_tas_config_cmd_v3 *cmd)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i;
bool enabled;
int ret, tbl_rev, i, block_list_size, enabled;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
/* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WTAS_WIFI_DATA_SIZE,
&tbl_rev);
......@@ -261,40 +260,54 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
goto out_free;
}
if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
tbl_rev != 0) {
if (tbl_rev == 1 && wifi_pkg->package.elements[1].type ==
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
u16 override_iec =
(tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
ACPI_WTAS_ENABLE_IEC_POS;
enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
cmd->override_tas_iec = cpu_to_le16(override_iec);
cmd->enable_tas_iec = cpu_to_le16(enabled_iec);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
enabled = !!wifi_pkg->package.elements[1].integer.value;
} else {
ret = -EINVAL;
goto out_free;
}
enabled = !!wifi_pkg->package.elements[1].integer.value;
if (!enabled) {
*block_list_size = -1;
IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
ret = 0;
goto out_free;
}
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[1].integer.value);
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
*block_list_size = wifi_pkg->package.elements[2].integer.value;
block_list_size = wifi_pkg->package.elements[2].integer.value;
cmd->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size);
if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
*block_list_size);
block_list_size);
ret = -EINVAL;
goto out_free;
}
for (i = 0; i < *block_list_size; i++) {
for (i = 0; i < block_list_size; i++) {
u32 country;
if (wifi_pkg->package.elements[3 + i].type !=
......@@ -306,11 +319,11 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
block_list_array[i] = cpu_to_le32(country);
cmd->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
ret = 0;
ret = 1;
out_free:
kfree(data);
return ret;
......
......@@ -65,10 +65,19 @@
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
* 1 type, 1 enabled, 1 block list size, 16 block list array
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
#define ACPI_WTAS_ENABLED_MSK 0x1
#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
#define ACPI_WTAS_ENABLE_IEC_POS 0x2
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
......@@ -105,6 +114,11 @@ struct iwl_geo_profile {
struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
};
/* Same thing as with SAR, all revisions fit in revision 2 */
struct iwl_ppag_chain {
s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
};
enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
......@@ -198,8 +212,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
struct iwl_per_chain_offset *table,
u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array,
int *block_list_size);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
struct iwl_tas_config_cmd_v3 *cmd);
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
......@@ -280,8 +294,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *block_list_array,
int *block_list_size)
struct iwl_tas_config_cmd_v3 *cmd)
{
return -ENOENT;
}
......
......@@ -157,15 +157,6 @@ enum iwl_card_state_flags {
CARD_IS_RX_ON = 0x10,
};
/**
* struct iwl_radio_version_notif - information on the card state
* ( CARD_STATE_NOTIFICATION = 0xa1 )
* @flags: &enum iwl_card_state_flags
*/
struct iwl_card_state_notif {
__le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
/**
* enum iwl_error_recovery_flags - flags for error recovery cmd
* @ERROR_RECOVERY_UPDATE_DB: update db from blob sent
......
......@@ -91,7 +91,8 @@ enum iwl_legacy_cmds {
/**
* @SCAN_CFG_CMD:
* uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
* uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2
* or &struct iwl_scan_config
*/
SCAN_CFG_CMD = 0xc,
......@@ -384,13 +385,6 @@ enum iwl_legacy_cmds {
*/
REDUCE_TX_POWER_CMD = 0x9f,
/**
* @CARD_STATE_NOTIFICATION:
* Card state (RF/CT kill) notification,
* uses &struct iwl_card_state_notif
*/
CARD_STATE_NOTIFICATION = 0xa1,
/**
* @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
*/
......
......@@ -124,7 +124,7 @@ struct iwl_fw_ini_region_internal_buffer {
* @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @sub_type: region sub type
* @sub_type_ver: region sub type
* @sub_type_ver: region sub type version
* @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
......@@ -386,7 +386,16 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
#define IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM 1
enum iwl_fw_ini_region_device_memory_subtype {
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18,
IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20,
}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */
/**
* enum iwl_fw_ini_time_point
......@@ -474,4 +483,17 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
};
/**
* enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset
*
* @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default)
* @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW
* @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW
*/
enum iwl_fw_ini_trigger_reset_fw_policy {
IWL_FW_INI_RESET_FW_MODE_NOTHING = 0,
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY,
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW
};
#endif
......@@ -393,17 +393,32 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
#define IWL_TAS_BLACK_LIST_MAX 16
#define IWL_TAS_BLOCK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd - configures the TAS
* struct iwl_tas_config_cmd_v2 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: block list countries (without TAS)
* @block_list_array: list of countries where TAS must be disabled
*/
struct iwl_tas_config_cmd {
struct iwl_tas_config_cmd_v2 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLACK_LIST_MAX];
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
__le32 block_list_size;
__le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
* enum iwl_lari_configs - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
......
......@@ -419,7 +419,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
* struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v2 {
__le32 ops;
......@@ -431,7 +431,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
* struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
......@@ -443,7 +443,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
* struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v4 {
__le32 ops;
......@@ -455,7 +455,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
* struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
*/
struct iwl_geo_tx_power_profiles_cmd_v5 {
__le32 ops;
......
......@@ -116,9 +116,20 @@ enum IWL_TLC_MNG_NSS {
IWL_TLC_NSS_MAX
};
enum IWL_TLC_HT_BW_RATES {
IWL_TLC_HT_BW_NONE_160,
IWL_TLC_HT_BW_160,
/**
* enum IWL_TLC_MCS_PER_BW - mcs index per BW
* @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
* @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
* @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
* @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
* @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
*/
enum IWL_TLC_MCS_PER_BW {
IWL_TLC_MCS_PER_BW_80,
IWL_TLC_MCS_PER_BW_160,
IWL_TLC_MCS_PER_BW_320,
IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1,
IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1,
};
/**
......@@ -131,8 +142,8 @@ enum IWL_TLC_HT_BW_RATES {
* @amsdu: TX amsdu is supported
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
* pair (0 - 80mhz width and below, 1 - 160mhz).
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW
* <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(@enum iwl_tlc_mng_cfg_cw)
......@@ -140,7 +151,7 @@ enum IWL_TLC_HT_BW_RATES {
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
struct iwl_tlc_config_cmd {
struct iwl_tlc_config_cmd_v3 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
......@@ -149,13 +160,44 @@ struct iwl_tlc_config_cmd {
u8 amsdu;
__le16 flags;
__le16 non_ht_rates;
__le16 ht_rates[IWL_TLC_NSS_MAX][2];
__le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
u8 reserved2;
__le32 max_tx_op;
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
* struct tlc_config_cmd - TLC configuration
* @sta_id: station id
* @reserved1: reserved
* @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
* @mode: &enum iwl_tlc_mng_cfg_mode
* @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(&enum iwl_tlc_mng_cfg_cw)
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @non_ht_rates: bitmap of supported legacy rates
* @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
* pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
* @max_mpdu_len: max MPDU length, in bytes
* @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
* set zero for no limit.
*/
struct iwl_tlc_config_cmd_v4 {
u8 sta_id;
u8 reserved1[3];
u8 max_ch_width;
u8 mode;
u8 chains;
u8 sgi_ch_width_supp;
__le16 flags;
__le16 non_ht_rates;
__le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
__le16 max_mpdu_len;
__le16 max_tx_op;
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
......
......@@ -82,6 +82,16 @@ enum iwl_scan_offload_band_selection {
IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc,
};
enum iwl_scan_offload_auth_alg {
IWL_AUTH_ALGO_UNSUPPORTED = 0x00,
IWL_AUTH_ALGO_NONE = 0x01,
IWL_AUTH_ALGO_PSK = 0x02,
IWL_AUTH_ALGO_8021X = 0x04,
IWL_AUTH_ALGO_SAE = 0x08,
IWL_AUTH_ALGO_8021X_SHA384 = 0x10,
IWL_AUTH_ALGO_OWE = 0x20,
};
/**
* struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
* @ssid_index: index to ssid list in fixed part
......@@ -201,7 +211,7 @@ struct iwl_scan_channel_cfg_lmac {
__le32 iter_interval;
} __packed;
/*
/**
* struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
* @offset: offset in the data block
* @len: length of the segment
......@@ -211,7 +221,8 @@ struct iwl_scan_probe_segment {
__le16 len;
} __packed;
/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
/**
* struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
......@@ -224,7 +235,8 @@ struct iwl_scan_probe_req_v1 {
u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
} __packed;
/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
/**
* struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
* @mac_header: first (and common) part of the probe
* @band_data: band specific data
* @common_data: last (and common) part of the probe
......@@ -247,7 +259,8 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
/**
* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
* @flags: enum iwl_scan_channel_flags
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
* involved.
......@@ -492,7 +505,7 @@ struct iwl_scan_dwell {
} __packed;
/**
* struct iwl_scan_config_v1
* struct iwl_scan_config_v1 - scan configuration command
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
......@@ -524,6 +537,21 @@ struct iwl_scan_config_v1 {
#define SCAN_LB_LMAC_IDX 0
#define SCAN_HB_LMAC_IDX 1
/**
* struct iwl_scan_config_v2 - scan configuration command
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
* @legacy_rates: default legacy rates - enum scan_config_rates
* @out_of_channel_time: default max out of serving channel time
* @suspend_time: default max suspend time
* @dwell: dwells for the scan
* @mac_addr: default mac address to be used in probes
* @bcast_sta_id: the index of the station in the fw
* @channel_flags: default channel flags - enum iwl_channel_flags
* scan_config_channel_flag
* @channel_array: default supported channels
*/
struct iwl_scan_config_v2 {
__le32 flags;
__le32 tx_chains;
......@@ -539,7 +567,7 @@ struct iwl_scan_config_v2 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
/**
* struct iwl_scan_config
* struct iwl_scan_config - scan configuration command
* @enable_cam_mode: whether to enable CAM mode.
* @enable_promiscouos_mode: whether to enable promiscouos mode
* @bcast_sta_id: the index of the station in the fw. Deprecated starting with
......@@ -640,6 +668,10 @@ enum iwl_umac_scan_general_flags2 {
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case
* &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is
* activated over 6GHz PSC channels, filter in beacons and probe responses.
* @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum
* rate of 5.5Mpbs, filter in broadcast probe responses and set the max
* channel time indication field in the FILS request parameters element
* (if included by the driver in the probe request IEs).
*/
enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0),
......@@ -657,6 +689,7 @@ enum iwl_umac_scan_general_flags_v2 {
IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13),
IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14),
IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15),
};
/**
......
......@@ -177,6 +177,17 @@ enum iwl_tx_offload_assist_flags_pos {
#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
enum iwl_tx_offload_assist_bz {
IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff,
IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800,
IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000,
IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000,
IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000,
IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000,
IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000,
IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000,
};
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
* struct iwl_tx_cmd - TX command struct to FW
......
......@@ -1564,7 +1564,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
prph_data = iwl_read_prph(fwrt->trans, (i % 2) ?
prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
if (prph_data == 0x5a5a5a5a) {
......@@ -2110,7 +2110,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
*/
hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
if (hw_type == IWL_AX210_HW_TYPE) {
u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2);
u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
u32 masked_bits = is_jacket | (is_cdb << 1);
......@@ -2719,6 +2719,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
......
......@@ -12,6 +12,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "iwl-csr.h"
#include "pnvm.h"
/*
* Note: This structure is read from the device with IO accesses,
......@@ -19,53 +20,6 @@
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_error_event_table_v1 {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
u32 pc; /* program counter */
u32 blink1; /* branch link */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
u32 data1; /* error-specific data */
u32 data2; /* error-specific data */
u32 data3; /* error-specific data */
u32 bcon_time; /* beacon timer */
u32 tsf_low; /* network timestamp function timer */
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
u32 gp3; /* GP3 timer register */
u32 ucode_ver; /* uCode version */
u32 hw_ver; /* HW Silicon version */
u32 brd_ver; /* HW board version */
u32 log_pc; /* log program counter */
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
u32 isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
u32 isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
u32 isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
u32 isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
......@@ -147,6 +101,7 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
struct iwl_trans *trans = fwrt->trans;
struct iwl_umac_error_event_table table = {};
u32 base = fwrt->trans->dbg.umac_error_event_table;
char pnvm_name[MAX_PNVM_NAME];
if (!base &&
!(fwrt->trans->dbg.error_event_table_tlv_status &
......@@ -164,6 +119,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
fwrt->trans->status, table.valid);
}
if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) ==
FW_SYSASSERT_PNVM_MISSING) {
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
IWL_ERR(fwrt, "PNVM data is missing, please install %s\n",
pnvm_name);
}
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
......@@ -297,21 +259,21 @@ struct iwl_tcm_error_event_table {
u32 reserved[4];
} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_tcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.tcm_error_event_table;
u32 base = fwrt->trans->dbg.tcm_error_event_table[idx];
int i;
u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 :
IWL_ERROR_EVENT_TABLE_TCM1;
if (!base ||
!(fwrt->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_TCM))
if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
return;
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
IWL_ERR(fwrt, "TCM status:\n");
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
......@@ -330,13 +292,72 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
}
/*
* RCM error struct.
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with u32-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwl_rcm_error_event_table {
u32 valid;
u32 error_id;
u32 blink2;
u32 ilink1;
u32 ilink2;
u32 data1, data2, data3;
u32 logpc;
u32 frame_pointer;
u32 stack_pointer;
u32 msgid;
u32 isr;
u32 frame_hw_status;
u32 mbx_lmac_to_rcm_req;
u32 mbx_rcm_to_lmac_req;
u32 mh_ctl;
u32 mh_addr1_lo;
u32 mh_info;
u32 mh_err;
u32 reserved[3];
} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */
static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
{
struct iwl_trans *trans = fwrt->trans;
struct iwl_rcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.rcm_error_event_table[idx];
u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 :
IWL_ERROR_EVENT_TABLE_RCM1;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH);
if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag))
return;
IWL_ERR(fwrt, "Function Scratch status:\n");
IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n",
table.mbx_lmac_to_rcm_req);
IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n",
table.mbx_rcm_to_lmac_req);
IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
......@@ -420,8 +441,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
if (fwrt->trans->dbg.lmac_error_event_table[1])
iwl_fwrt_dump_lmac_error_log(fwrt, 1);
iwl_fwrt_dump_umac_error_log(fwrt);
iwl_fwrt_dump_tcm_error_log(fwrt);
iwl_fwrt_dump_tcm_error_log(fwrt, 0);
iwl_fwrt_dump_rcm_error_log(fwrt, 0);
iwl_fwrt_dump_tcm_error_log(fwrt, 1);
iwl_fwrt_dump_rcm_error_log(fwrt, 1);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
IWL_ERR(fwrt, "Function Scratch status:\n");
IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
}
}
IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
......@@ -98,7 +98,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_VERSION = 62,
IWL_UCODE_TLV_PNVM_SKU = 64,
IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
......
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2019 - 2020 Intel Corporation
* Copyright(c) 2019 - 2021 Intel Corporation
*/
#include "img.h"
......@@ -49,10 +49,9 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
#define FW_SYSASSERT_CPU_MASK 0xf0000000
static const struct {
const char *name;
u8 num;
u32 num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
{ "SYSASSERT", 0x35 },
......@@ -73,6 +72,7 @@ static const struct {
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
{ "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING },
{ "ADVANCED_SYSASSERT", 0 },
};
......
......@@ -279,4 +279,8 @@ u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
const char *iwl_fw_lookup_assert_desc(u32 num);
#define FW_SYSASSERT_CPU_MASK 0xf0000000
#define FW_SYSASSERT_PNVM_MISSING 0x0010070d
#endif /* __iwl_fw_img_h__ */
......@@ -158,7 +158,8 @@ struct iwl_fw_runtime {
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
union iwl_ppag_table_cmd ppag_table;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
u32 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
......
......@@ -84,6 +84,10 @@ enum iwl_nvm_type {
#define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
NETIF_F_TSO | NETIF_F_TSO6)
#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
IWL_TX_CSUM_NETIF_FLAGS_BZ | \
NETIF_F_RXCSUM)
/* Antenna presence definitions */
#define ANT_NONE 0x0
......@@ -448,6 +452,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_CDB 0x0
#define IWL_CFG_CDB 0x1
#define IWL_CFG_NO_JACKET 0x0
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
......@@ -462,6 +469,7 @@ struct iwl_dev_info {
u8 no_160;
u8 cores;
u8 cdb;
u8 jacket;
const struct iwl_cfg *cfg;
const char *name;
};
......@@ -634,6 +642,11 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
......@@ -109,9 +109,10 @@
#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
#define CSR_IPC_SLEEP_CONTROL_RESUME 0
/* Doorbell NMI (since Bz) */
/* Doorbell - since Bz
* connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only)
*/
#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130)
#define CSR_DOORBELL_VECTOR_NMI BIT(1)
/* host chicken bits */
#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
......
......@@ -233,6 +233,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
struct iwl_ucode_tlv *dup = NULL;
int ret;
......@@ -247,6 +248,10 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return -EINVAL;
}
IWL_DEBUG_FW(trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
if (!le32_to_cpu(trig->occurrences)) {
dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
GFP_KERNEL);
......@@ -300,14 +305,21 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
bool ext)
{
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
u32 type;
u32 tlv_idx;
u32 domain;
int ret;
if (le32_to_cpu(tlv->length) < sizeof(*hdr))
return;
type = le32_to_cpu(tlv->type);
tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
domain = le32_to_cpu(hdr->domain);
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & trans->dbg.domains_bitmap)) {
IWL_DEBUG_FW(trans,
......@@ -1159,6 +1171,8 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
data);
int ret, i;
u32 tp = le32_to_cpu(dump_data.trig->time_point);
if (!num_data) {
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
......@@ -1177,8 +1191,42 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
break;
}
}
}
fwrt->trans->dbg.restart_required = FALSE;
IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n",
fwrt->trans->dbg.restart_required,
fwrt->trans->dbg.last_tp_resetfw);
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
fwrt->trans->dbg.restart_required = TRUE;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
fwrt->trans->dbg.restart_required = FALSE;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n");
fwrt->trans->dbg.restart_required = TRUE;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n");
fwrt->trans->dbg.restart_required = FALSE;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
IWL_DEBUG_INFO(fwrt,
"WRT: nothing need to be done after debug collection\n");
} else {
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
le32_to_cpu(dump_data.trig->reset_fw));
}
}
return 0;
}
......
......@@ -130,6 +130,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
/* clear the data for the aborted load case */
memset(&drv->fw, 0, sizeof(drv->fw));
}
static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
......@@ -586,6 +589,66 @@ static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
excl->size = le32_to_cpu(fw->size);
}
static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_region_tlv *region;
u32 length = le32_to_cpu(tlv->length);
u32 addr;
if (length < offsetof(typeof(*region), special_mem) +
sizeof(region->special_mem))
return;
region = (void *)tlv->data;
addr = le32_to_cpu(region->special_mem.base_addr);
addr += le32_to_cpu(region->special_mem.offset);
addr &= ~FW_ADDR_CACHE_CONTROL;
if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY)
return;
switch (region->sub_type) {
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE:
drv->trans->dbg.umac_error_event_table = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_UMAC;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE:
drv->trans->dbg.lmac_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE:
drv->trans->dbg.lmac_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_LMAC2;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE:
drv->trans->dbg.tcm_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE:
drv->trans->dbg.tcm_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM2;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE:
drv->trans->dbg.rcm_error_event_table[0] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_RCM1;
break;
case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE:
drv->trans->dbg.rcm_error_event_table[1] = addr;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_RCM2;
break;
default:
break;
}
}
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
......@@ -1153,21 +1216,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_ERROR_EVENT_TABLE_LMAC1;
break;
}
case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
if (tlv_len != sizeof(*ptr))
goto invalid_tlv_len;
drv->trans->dbg.tcm_error_event_table =
le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
drv->trans->dbg.error_event_table_tlv_status |=
IWL_ERROR_EVENT_TABLE_TCM;
break;
}
case IWL_UCODE_TLV_TYPE_REGIONS:
iwl_parse_dbg_tlv_assert_tables(drv, tlv);
fallthrough;
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
case IWL_UCODE_TLV_TYPE_TRIGGERS:
case IWL_UCODE_TLV_TYPE_CONF_SET:
if (iwlwifi_mod_params.enable_ini)
......@@ -1375,6 +1429,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
bool usniffer_images = false;
bool failure = true;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
......@@ -1635,15 +1690,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* else from proceeding if the module fails to load
* or hangs loading.
*/
if (load_module) {
if (load_module)
request_module("%s", op->name);
#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err);
#endif
}
failure = false;
goto free;
try_again:
......@@ -1659,6 +1708,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
free:
if (failure)
iwl_dealloc_ucode(drv);
if (pieces) {
for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
kfree(pieces->img[i].sec);
......
......@@ -10,6 +10,7 @@
#include "iwl-modparams.h"
#include "iwl-eeprom-parse.h"
#if IS_ENABLED(CONFIG_IWLDVM)
/* EEPROM offset definitions */
/* indirect access definitions */
......@@ -647,6 +648,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
#endif
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
......@@ -750,6 +752,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
}
}
#if IS_ENABLED(CONFIG_IWLDVM)
static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
......@@ -873,3 +876,4 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
}
IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
#endif
......@@ -580,7 +580,7 @@ struct iwl_rb_status {
__le16 closed_fr_num;
__le16 finished_rb_num;
__le16 finished_fr_nam;
__le32 __unused;
__le32 __spare;
} __packed;
......
......@@ -218,7 +218,7 @@ void iwl_force_nmi(struct iwl_trans *trans)
UREG_DOORBELL_TO_ISR6_NMI_BIT);
else
iwl_write32(trans, CSR_DOORBELL_VECTOR,
CSR_DOORBELL_VECTOR_NMI);
UREG_DOORBELL_TO_ISR6_NMI_BIT);
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
......
......@@ -347,9 +347,7 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
#define WFPM_CTRL_REG_GEN2 0xd03030
#define WFPM_OTP_CFG1_ADDR 0x00a03098
#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
......
......@@ -193,7 +193,10 @@ enum iwl_error_event_table_status {
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
};
/**
......@@ -728,7 +731,8 @@ struct iwl_self_init_dram {
* @trigger_tlv: array of pointers to triggers TLVs for debug
* @lmac_error_event_table: addrs of lmacs error tables
* @umac_error_event_table: addr of umac error table
* @tcm_error_event_table: address of TCM error table
* @tcm_error_event_table: address(es) of TCM error table(s)
* @rcm_error_event_table: address(es) of RCM error table(s)
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
......@@ -755,7 +759,8 @@ struct iwl_trans_debug {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
u32 tcm_error_event_table;
u32 tcm_error_event_table[2];
u32 rcm_error_event_table[2];
unsigned int error_event_table_tlv_status;
enum iwl_ini_cfg_state internal_ini_cfg;
......@@ -778,6 +783,8 @@ struct iwl_trans_debug {
u32 domains_bitmap;
u32 ucode_preset;
bool restart_required;
u32 last_tp_resetfw;
};
struct iwl_dma_ptr {
......
......@@ -1705,6 +1705,7 @@ void iwl_mei_unregister_complete(void)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN);
mei->got_ownership = false;
}
mutex_unlock(&iwl_mei_mutex);
......@@ -1811,6 +1812,12 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
/*
* The CSME firmware needs to boot the internal WLAN client. Wait here
* so that the DMA map request will succeed.
*/
msleep(20);
ret = iwl_mei_alloc_shared_mem(cldev);
if (ret)
goto free;
......
......@@ -107,7 +107,7 @@
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0
......
......@@ -511,7 +511,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
if (sta->mfp)
if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
FTM_PUT_FLAG(PMF);
rcu_read_unlock();
......@@ -1066,7 +1066,8 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100;
rtt_avg = alpha * rtt + (100 - alpha) * resp->rtt_avg;
do_div(rtt_avg, 100);
IWL_DEBUG_INFO(mvm,
"%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
......
This diff is collapsed.
......@@ -641,14 +641,21 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
if (iwl_mvm_is_oce_supported(mvm)) {
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
IWL_ALWAYS_LONG_GROUP,
SCAN_REQ_UMAC, 0);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
/* Old firmware also supports probe deferral and suppression */
if (scan_ver < 15)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
}
if (mvm->nvm_data->sku_cap_11ax_enable &&
......@@ -710,8 +717,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->netdev_features |= mvm->cfg->features;
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
NETIF_F_RXCSUM);
hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
if (mvm->cfg->vht_mu_mimo_supported)
wiphy_ext_feature_set(hw->wiphy,
......@@ -5517,6 +5523,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_tx_csum_bz(mvm, head, true) ==
iwl_mvm_tx_csum_bz(mvm, skb, true);
/* For now don't aggregate IPv6 in AMSDU */
if (skb->protocol != htons(ETH_P_IP))
return false;
......
......@@ -1518,6 +1518,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
unsigned int tid);
u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
......@@ -1623,8 +1624,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
......
......@@ -332,9 +332,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
RX_HANDLER_SYNC, struct iwl_card_state_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
......@@ -455,7 +452,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(STATISTICS_NOTIFICATION),
HCMD_NAME(EOSP_NOTIFICATION),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(CARD_STATE_NOTIFICATION),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(TDLS_CONFIG_CMD),
HCMD_NAME(MAC_PM_POWER_TABLE),
......@@ -1306,12 +1302,18 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
/*
* Get NVM failed, but we are registered to MEI, we'll get
* the NVM later when it'll be possible to get it from CSME.
*/
if (iwl_mvm_start_get_nvm(mvm) && mvm->mei_registered)
return op_mode;
if (iwl_mvm_start_get_nvm(mvm)) {
/*
* Getting NVM failed while CSME is the owner, but we are
* registered to MEI, we'll get the NVM later when it'll be
* possible to get it from CSME.
*/
if (trans->csme_own && mvm->mei_registered)
return op_mode;
goto out_thermal_exit;
}
if (iwl_mvm_start_post_nvm(mvm))
goto out_thermal_exit;
......@@ -1839,9 +1841,16 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
iwl_fw_error_collect(&mvm->fwrt, false);
if (fw_error && mvm->fw_restart > 0)
if (fw_error && mvm->fw_restart > 0) {
mvm->fw_restart--;
ieee80211_restart_hw(mvm->hw);
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
mvm->fwrt.trans->dbg.restart_required = FALSE;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
}
}
}
......@@ -1872,7 +1881,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
return;
iwl_mvm_nic_restart(mvm, true);
iwl_mvm_nic_restart(mvm, false);
}
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
......@@ -1921,6 +1930,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
if (unlikely(queue >= mvm->trans->num_rx_queues))
return;
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
......
......@@ -12,34 +12,52 @@
* frequency values in the adjusted format.
*/
static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
/* LPDDR4 */
/* frequency 2667MHz */
{cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 2933MHz */
{cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169,
171, 173, 175},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 3200MHz */
{cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* frequency 3733MHz */
{cpu_to_le16(223), {114, 116, 118, 120, 122,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
{cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4000MHz */
{cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5,}},
/* frequency 4267MHz */
{cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* DDR5ePOR */
/* frequency 4000MHz */
{cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6,}},
/* frequency 4400MHz */
{cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
{PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
/* LPDDR5iPOR */
/* frequency 5200MHz */
{cpu_to_le16(312), {36, 38, 40, 42, 50,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
{cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5,}},
/* frequency 5600MHz */
{cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122},
{PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
/* frequency 6000MHz */
{cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
......
......@@ -129,7 +129,7 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
static void
rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_vht_cap *vht_cap,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
u16 supp;
int i, highest_mcs;
......@@ -154,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp);
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
/*
* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
......@@ -164,8 +164,8 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
ieee80211_get_vht_max_nss(&ieee_vht_cap,
IEEE80211_VHT_CHANWIDTH_160MHZ,
0, true, nss) >= nss)
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160];
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
}
}
......@@ -189,7 +189,7 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
......@@ -219,7 +219,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
......@@ -230,14 +230,14 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
}
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
struct iwl_tlc_config_cmd_v4 *cmd)
{
int i;
u16 supp = 0;
......@@ -263,15 +263,15 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
cpu_to_le16(ht_cap->mcs.rx_mask[1]);
}
}
......@@ -315,18 +315,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_UPDATE_NOTIF, 0) < 3) {
rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate),
le32_to_cpu(notif->rate));
IWL_DEBUG_RATE(mvm,
"Got rate in old format. Rate: %s. Converting.\n",
pretty_rate);
lq_sta->last_rate_n_flags =
iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
} else {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
}
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_UPDATE_NOTIF, 0) < 3) {
rs_pretty_print_rate_v1(pretty_rate,
sizeof(pretty_rate),
le32_to_cpu(notif->rate));
IWL_DEBUG_RATE(mvm,
"Got rate in old format. Rate: %s. Converting.\n",
pretty_rate);
lq_sta->last_rate_n_flags =
iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
} else {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
}
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
......@@ -422,23 +423,18 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd cfg_cmd = {
struct iwl_tlc_config_cmd_v4 cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_ch_width = update ?
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
.flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
.max_mpdu_len = cpu_to_le16(max_amsdu_len),
.amsdu = iwl_mvm_is_csum_supported(mvm),
.max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ?
cpu_to_le16(max_amsdu_len) : 0,
};
int ret;
u16 cmd_size = sizeof(cfg_cmd);
/* In old versions of the API the struct is 4 bytes smaller */
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD, 0) < 3)
cmd_size -= 4;
int cmd_ver;
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
......@@ -453,8 +449,41 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd);
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD, 0);
if (cmd_ver == 4) {
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
sizeof(cfg_cmd), &cfg_cmd);
} else if (cmd_ver < 4) {
struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = {
.sta_id = cfg_cmd.sta_id,
.max_ch_width = cfg_cmd.max_ch_width,
.mode = cfg_cmd.mode,
.chains = cfg_cmd.chains,
.amsdu = !!cfg_cmd.max_mpdu_len,
.flags = cfg_cmd.flags,
.non_ht_rates = cfg_cmd.non_ht_rates,
.ht_rates[0][0] = cfg_cmd.ht_rates[0][0],
.ht_rates[0][1] = cfg_cmd.ht_rates[0][1],
.ht_rates[1][0] = cfg_cmd.ht_rates[1][0],
.ht_rates[1][1] = cfg_cmd.ht_rates[1][1],
.sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp,
.max_mpdu_len = cfg_cmd.max_mpdu_len,
};
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
TLC_MNG_CONFIG_CMD, 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd_v3);
} else {
ret = -EINVAL;
}
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
}
......
......@@ -121,12 +121,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
len -= 2;
pad_len = 2;
}
/*
* For non monitor interface strip the bytes the RADA might not have
* removed. As monitor interface cannot exist with other interfaces
* this removal is safe.
*/
if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) {
u32 pkt_flags = le32_to_cpu(pkt->len_n_flags);
/*
* If RADA was not enabled then decryption was not performed so
* the MIC cannot be removed.
*/
if (!(pkt_flags & FH_RSCSR_RADA_EN)) {
if (WARN_ON(crypt_len > mic_crc_len))
return -EINVAL;
mic_crc_len -= crypt_len;
}
if (WARN_ON(mic_crc_len > len))
return -EINVAL;
len -= mic_crc_len;
}
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
* an additional 8 bytes for SNAP/ethertype, see below) so that
......@@ -149,18 +176,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
*/
hdrlen += crypt_len;
if (WARN_ONCE(headlen < hdrlen,
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
hdrlen, len, crypt_len)) {
/*
* We warn and trace because we want to be able to see
* it in trace-cmd as well.
*/
IWL_DEBUG_RX(mvm,
"invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
hdrlen, len, crypt_len);
if (unlikely(headlen < hdrlen))
return -EINVAL;
}
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
......@@ -172,8 +189,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
* in the cases the hardware didn't handle, since it's rare to see
* such packets, even though the hardware did calculate the checksum
* in this case, just starting after the MAC header instead.
*
* Starting from Bz hardware, it calculates starting directly after
* the MAC header, so that matches mac80211's expectation.
*/
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (skb->ip_summed == CHECKSUM_COMPLETE &&
mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
struct {
u8 hdr[6];
__be16 type;
......@@ -1964,8 +1985,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (format == RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_MCS_NSS_MSK) >>
rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
RATE_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
......
......@@ -579,7 +579,9 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile->ssid_index = i;
/* Support any cipher and auth algorithm */
profile->unicast_cipher = 0xff;
profile->auth_alg = 0xff;
profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
profile->network_type = IWL_NETWORK_TYPE_ANY;
profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
......@@ -1826,8 +1828,6 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
}
}
flags = bssid_bitmap | (s_ssid_bitmap << 16);
if (cfg80211_channel_is_psc(params->channels[i]) &&
psc_no_listen)
flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
......@@ -1869,8 +1869,11 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
(s_max > 1 || b_max > 3));
}
if ((allow_passive && force_passive) ||
(!flags && !cfg80211_channel_is_psc(params->channels[i])))
(!(bssid_bitmap | s_ssid_bitmap) &&
!cfg80211_channel_is_psc(params->channels[i])))
flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
else
flags |= bssid_bitmap | (s_ssid_bitmap << 16);
channel_cfg[i].flags |= cpu_to_le32(flags);
}
......@@ -1924,22 +1927,19 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
}
/*
* 6GHz passive scan is allowed while associated in a defined time
* interval following HW reset or resume flow
* 6GHz passive scan is allowed in a defined time interval following HW
* reset or resume flow, or while not associated and a large interval
* has passed since the last 6GHz passive scan.
*/
if (vif->bss_conf.assoc &&
if ((vif->bss_conf.assoc ||
time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
(time_before(mvm->last_reset_or_resume_time_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
jiffies))) {
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n");
return;
}
/* No need for 6GHz passive scan if not enough time elapsed */
if (time_after(mvm->last_6ghz_passive_scan_jiffies +
(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) {
IWL_DEBUG_SCAN(mvm,
"6GHz passive scan: timeout did not expire\n");
IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
vif->bss_conf.assoc ? "associated" :
"timeout did not expire");
return;
}
......@@ -2037,6 +2037,12 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
if (params->enable_6ghz_passive)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
if (iwl_mvm_is_oce_supported(mvm) &&
(params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
return flags;
}
......@@ -2513,7 +2519,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
#define SCAN_TIMEOUT 20000
#define SCAN_TIMEOUT 30000
void iwl_mvm_scan_timeout_wk(struct work_struct *work)
{
......
......@@ -49,14 +49,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
/*
* Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
synchronize_net();
......@@ -82,9 +81,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
} else {
}
/*
* Clear the ROC_AUX_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
/* do the same in case of hot spot 2.0 */
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
/* In newer version of this command an aux station is added only
* in cases of dedicated tx queue and need to be removed in end
* of use */
......@@ -687,11 +696,14 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
/* When session protection is supported, the te_data->id field
/* When session protection is used, the te_data->id field
* is reused to save session protection's configuration.
* For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
* to HOT_SPOT_CMD.
*/
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
id != HOT_SPOT_CMD) {
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
......@@ -1027,7 +1039,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_p2p_roc_finished(mvm);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->time_event_data);
&mvmvif->hs_time_event_data);
iwl_mvm_roc_finished(mvm);
}
......
......@@ -39,11 +39,11 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info,
u16 offload_assist)
static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, bool amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u16 offload_assist = 0;
#if IS_ENABLED(CONFIG_INET)
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
u8 protocol = 0;
......@@ -106,8 +106,7 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
if (skb->protocol == htons(ETH_P_IP) &&
(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
if (skb->protocol == htons(ETH_P_IP) && amsdu) {
ip_hdr(skb)->check = 0;
offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
}
......@@ -132,9 +131,63 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
out:
#endif
if (amsdu)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
else if (ieee80211_hdrlen(hdr->frame_control) % 4)
/* padding is inserted later in transport */
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
return offload_assist;
}
u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
unsigned int csum_start = skb_checksum_start_offset(skb);
offload_assist |= u32_encode_bits(hdrlen / 2,
IWL_TX_CMD_OFFLD_BZ_MH_LEN);
if (amsdu)
offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU;
else if (hdrlen % 4)
/* padding is inserted later in transport */
offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return offload_assist;
offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM |
IWL_TX_CMD_OFFLD_BZ_ZERO2ONES;
/*
* mac80211 will always calculate checksum in software for
* non-fast-xmit, and so we can only do offloaded checksum
* for fast-xmit frames. In this case, we always have the
* RFC 1042 header present. skb_checksum_start_offset()
* returns the offset from the beginning, but the hardware
* needs it from after the header & SNAP header.
*/
csum_start -= hdrlen + 8;
offload_assist |= u32_encode_bits(csum_start,
IWL_TX_CMD_OFFLD_BZ_START_OFFS);
offload_assist |= u32_encode_bits(csum_start + skb->csum_offset,
IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS);
return offload_assist;
}
static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
bool amsdu)
{
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu);
return iwl_mvm_tx_csum_bz(mvm, skb, amsdu);
}
/*
* Sets most of the Tx cmd's fields
*/
......@@ -146,7 +199,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
u16 offload_assist = 0;
bool amsdu = false;
u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
......@@ -166,8 +219,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
......@@ -234,14 +286,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
/* padding is inserted later in transport */
if (ieee80211_hdrlen(fc) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
tx_cmd->offload_assist |=
cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
offload_assist));
tx_cmd->offload_assist =
cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu));
}
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
......@@ -463,27 +509,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
u16 offload_assist = 0;
u32 rate_n_flags = 0;
u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
bool amsdu = false;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
offload_assist);
/* padding is inserted later in transport */
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
......@@ -503,8 +540,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (mvm->trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
cmd->offload_assist |= cpu_to_le32(offload_assist);
cmd->offload_assist = cpu_to_le32(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
......@@ -516,8 +555,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
} else {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb,
info,
amsdu);
cmd->offload_assist |= cpu_to_le16(offload_assist);
cmd->offload_assist = cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
......
......@@ -2266,7 +2266,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
/*
* In some rare cases when the HW is in a bad state, we may
* get this interrupt too early, when prph_info is still NULL.
* So make sure that it's not NULL to prevent crashing.
*/
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment