Commit 97b9b844 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2016-03-30' of...

Merge tag 'iwlwifi-next-for-kalle-2016-03-30' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* Support for Link Quality measurement (Aviya)
* Improvements in thermal (Chaya Rachel)
* Various cleanups (many people)
* Improvements in firmware error dump (Golan)
* More work 9000 devices and MSIx (Haim)
* Continuation of the Dynamic Queue Allocation work (Liad)
* Scan timeout to cope with buggy firmware (Luca)
* D0i3 improvements (Luca)
* Make the paging less memory hungry (Matti)
* 9000 new Rx path (Sara)
parents 4da46ceb 46167a8f
...@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING ...@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
If unsure, don't enable this option, as some programs might If unsure, don't enable this option, as some programs might
expect incoming broadcasts for their normal operations. expect incoming broadcasts for their normal operations.
config IWLWIFI_UAPSD
bool "enable U-APSD by default"
depends on IWLMVM
help
Say Y here to enable U-APSD by default. This may cause
interoperability problems with some APs, manifesting in lower than
expected throughput due to those APs not enabling aggregation
If unsure, say N.
config IWLWIFI_PCIE_RTPM config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices" bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM depends on IWLMVM && PM
......
...@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data) ...@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
static void iwl_setup_deferred_work(struct iwl_priv *priv) static void iwl_setup_deferred_work(struct iwl_priv *priv)
{ {
priv->workqueue = create_singlethread_workqueue(DRV_NAME); priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
INIT_WORK(&priv->restart, iwl_bg_restart); INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
......
...@@ -34,10 +34,6 @@ ...@@ -34,10 +34,6 @@
#define IWL1000_UCODE_API_MAX 5 #define IWL1000_UCODE_API_MAX 5
#define IWL100_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5
/* Oldest version we won't warn about */
#define IWL1000_UCODE_API_OK 5
#define IWL100_UCODE_API_OK 5
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL1000_UCODE_API_MIN 1 #define IWL1000_UCODE_API_MIN 1
#define IWL100_UCODE_API_MIN 5 #define IWL100_UCODE_API_MIN 5
...@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { ...@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
#define IWL_DEVICE_1000 \ #define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \ .fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \
.ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_1000, \ .device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
...@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = { ...@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
#define IWL_DEVICE_100 \ #define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \ .fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_max = IWL100_UCODE_API_MAX, \
.ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \ .ucode_api_min = IWL100_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_100, \ .device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
...@@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = { ...@@ -136,5 +130,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
IWL_DEVICE_100, IWL_DEVICE_100,
}; };
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
...@@ -36,12 +36,6 @@ ...@@ -36,12 +36,6 @@
#define IWL105_UCODE_API_MAX 6 #define IWL105_UCODE_API_MAX 6
#define IWL135_UCODE_API_MAX 6 #define IWL135_UCODE_API_MAX 6
/* Oldest version we won't warn about */
#define IWL2030_UCODE_API_OK 6
#define IWL2000_UCODE_API_OK 6
#define IWL105_UCODE_API_OK 6
#define IWL135_UCODE_API_OK 6
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5 #define IWL2030_UCODE_API_MIN 5
#define IWL2000_UCODE_API_MIN 5 #define IWL2000_UCODE_API_MIN 5
...@@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { ...@@ -114,7 +108,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
#define IWL_DEVICE_2000 \ #define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \ .fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \ .ucode_api_max = IWL2000_UCODE_API_MAX, \
.ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \ .ucode_api_min = IWL2000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2000, \ .device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { ...@@ -142,7 +135,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
#define IWL_DEVICE_2030 \ #define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \ .fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \ .ucode_api_max = IWL2030_UCODE_API_MAX, \
.ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \ .ucode_api_min = IWL2030_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2030, \ .device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { ...@@ -163,7 +155,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
#define IWL_DEVICE_105 \ #define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \ .fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \ .ucode_api_max = IWL105_UCODE_API_MAX, \
.ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \ .ucode_api_min = IWL105_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_105, \ .device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { ...@@ -191,7 +182,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
#define IWL_DEVICE_135 \ #define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \ .fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \ .ucode_api_max = IWL135_UCODE_API_MAX, \
.ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \ .ucode_api_min = IWL135_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_135, \ .device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = { ...@@ -210,7 +200,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
.ht_params = &iwl2000_ht_params, .ht_params = &iwl2000_ht_params,
}; };
MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
...@@ -34,10 +34,6 @@ ...@@ -34,10 +34,6 @@
#define IWL5000_UCODE_API_MAX 5 #define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2 #define IWL5150_UCODE_API_MAX 2
/* Oldest version we won't warn about */
#define IWL5000_UCODE_API_OK 5
#define IWL5150_UCODE_API_OK 2
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL5000_UCODE_API_MIN 1 #define IWL5000_UCODE_API_MIN 1
#define IWL5150_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1
...@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { ...@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
#define IWL_DEVICE_5000 \ #define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \ .fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \
.ucode_api_ok = IWL5000_UCODE_API_OK, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5000, \ .device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
...@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { ...@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN", .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE, .fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX, .ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_ok = IWL5000_UCODE_API_OK,
.ucode_api_min = IWL5000_UCODE_API_MIN, .ucode_api_min = IWL5000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_5000, .device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE, .max_inst_size = IWLAGN_RTC_INST_SIZE,
...@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { ...@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
#define IWL_DEVICE_5150 \ #define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \ .fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \
.ucode_api_ok = IWL5150_UCODE_API_OK, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5150, \ .device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \
...@@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = { ...@@ -174,5 +167,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
IWL_DEVICE_5150, IWL_DEVICE_5150,
}; };
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
...@@ -36,13 +36,6 @@ ...@@ -36,13 +36,6 @@
#define IWL6000G2_UCODE_API_MAX 6 #define IWL6000G2_UCODE_API_MAX 6
#define IWL6035_UCODE_API_MAX 6 #define IWL6035_UCODE_API_MAX 6
/* Oldest version we won't warn about */
#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
#define IWL6050_UCODE_API_OK 5
#define IWL6000G2B_UCODE_API_OK 6
#define IWL6035_UCODE_API_OK 6
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4 #define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4 #define IWL6050_UCODE_API_MIN 4
...@@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { ...@@ -136,7 +129,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
#define IWL_DEVICE_6005 \ #define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \ .fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6005, \ .device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { ...@@ -191,7 +183,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
#define IWL_DEVICE_6030 \ #define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \ .device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = { ...@@ -228,7 +219,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
#define IWL_DEVICE_6035 \ #define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \ .ucode_api_max = IWL6035_UCODE_API_MAX, \
.ucode_api_ok = IWL6035_UCODE_API_OK, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \ .ucode_api_min = IWL6035_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \ .device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = { ...@@ -282,7 +272,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \ #define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \ .fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \ .ucode_api_max = IWL6000_UCODE_API_MAX, \
.ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \ .ucode_api_min = IWL6000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6000i, \ .device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = { ...@@ -370,7 +359,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE, .fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX, .ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN, .ucode_api_min = IWL6000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_6000, .device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE, .max_inst_size = IWL60_RTC_INST_SIZE,
...@@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = { ...@@ -383,7 +371,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.led_mode = IWL_LED_BLINK, .led_mode = IWL_LED_BLINK,
}; };
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
...@@ -76,16 +76,10 @@ ...@@ -76,16 +76,10 @@
#define IWL7265D_UCODE_API_MAX 21 #define IWL7265D_UCODE_API_MAX 21
#define IWL3168_UCODE_API_MAX 21 #define IWL3168_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13
#define IWL7265_UCODE_API_OK 13
#define IWL7265D_UCODE_API_OK 13
#define IWL3168_UCODE_API_OK 20
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 13 #define IWL7260_UCODE_API_MIN 16
#define IWL7265_UCODE_API_MIN 13 #define IWL7265_UCODE_API_MIN 16
#define IWL7265D_UCODE_API_MIN 13 #define IWL7265D_UCODE_API_MIN 16
#define IWL3168_UCODE_API_MIN 20 #define IWL3168_UCODE_API_MIN 20
/* NVM versions */ /* NVM versions */
...@@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = { ...@@ -179,25 +173,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
#define IWL_DEVICE_7000 \ #define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7260_UCODE_API_MAX, \ .ucode_api_max = IWL7260_UCODE_API_MAX, \
.ucode_api_ok = IWL7260_UCODE_API_OK, \
.ucode_api_min = IWL7260_UCODE_API_MIN .ucode_api_min = IWL7260_UCODE_API_MIN
#define IWL_DEVICE_7005 \ #define IWL_DEVICE_7005 \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265_UCODE_API_MAX, \ .ucode_api_max = IWL7265_UCODE_API_MAX, \
.ucode_api_ok = IWL7265_UCODE_API_OK, \
.ucode_api_min = IWL7265_UCODE_API_MIN .ucode_api_min = IWL7265_UCODE_API_MIN
#define IWL_DEVICE_3008 \ #define IWL_DEVICE_3008 \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL3168_UCODE_API_MAX, \ .ucode_api_max = IWL3168_UCODE_API_MAX, \
.ucode_api_ok = IWL3168_UCODE_API_OK, \
.ucode_api_min = IWL3168_UCODE_API_MIN .ucode_api_min = IWL3168_UCODE_API_MIN
#define IWL_DEVICE_7005D \ #define IWL_DEVICE_7005D \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265D_UCODE_API_MAX, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \
.ucode_api_ok = IWL7265D_UCODE_API_OK, \
.ucode_api_min = IWL7265D_UCODE_API_MIN .ucode_api_min = IWL7265D_UCODE_API_MIN
const struct iwl_cfg iwl7260_2ac_cfg = { const struct iwl_cfg iwl7260_2ac_cfg = {
...@@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = { ...@@ -388,8 +378,8 @@ const struct iwl_cfg iwl7265d_n_cfg = {
.dccm_len = IWL7265_DCCM_LEN, .dccm_len = IWL7265_DCCM_LEN,
}; };
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK)); MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
...@@ -73,12 +73,8 @@ ...@@ -73,12 +73,8 @@
#define IWL8000_UCODE_API_MAX 21 #define IWL8000_UCODE_API_MAX 21
#define IWL8265_UCODE_API_MAX 21 #define IWL8265_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 13
#define IWL8265_UCODE_API_OK 20
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 13 #define IWL8000_UCODE_API_MIN 16
#define IWL8265_UCODE_API_MIN 20 #define IWL8265_UCODE_API_MIN 20
/* NVM versions */ /* NVM versions */
...@@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = { ...@@ -175,19 +171,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
#define IWL_DEVICE_8000 \ #define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \ IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \ .ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8260 \ #define IWL_DEVICE_8260 \
IWL_DEVICE_8000_COMMON, \ IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \
.ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \ .ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8265 \ #define IWL_DEVICE_8265 \
IWL_DEVICE_8000_COMMON, \ IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8265_UCODE_API_MAX, \ .ucode_api_max = IWL8265_UCODE_API_MAX, \
.ucode_api_ok = IWL8265_UCODE_API_OK, \
.ucode_api_min = IWL8265_UCODE_API_MIN \ .ucode_api_min = IWL8265_UCODE_API_MIN \
const struct iwl_cfg iwl8260_2n_cfg = { const struct iwl_cfg iwl8260_2n_cfg = {
...@@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { ...@@ -259,5 +252,5 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
}; };
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK)); MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015-2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* *
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015-2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -57,11 +57,8 @@ ...@@ -57,11 +57,8 @@
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 21 #define IWL9000_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL9000_UCODE_API_OK 13
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 13 #define IWL9000_UCODE_API_MIN 16
/* NVM versions */ /* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d #define IWL9000_NVM_VERSION 0x0a1d
...@@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -122,7 +119,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \ #define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_max = IWL9000_UCODE_API_MAX, \
.ucode_api_ok = IWL9000_UCODE_API_OK, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \ .device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \ .max_inst_size = IWL60_RTC_INST_SIZE, \
...@@ -137,14 +133,15 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -137,14 +133,15 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.dccm2_len = IWL9000_DCCM2_LEN, \ .dccm2_len = IWL9000_DCCM2_LEN, \
.smem_offset = IWL9000_SMEM_OFFSET, \ .smem_offset = IWL9000_SMEM_OFFSET, \
.smem_len = IWL9000_SMEM_LEN, \ .smem_len = IWL9000_SMEM_LEN, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.thermal_params = &iwl9000_tt_params, \ .thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \ .apmg_not_supported = true, \
.mq_rx_supported = true, \ .mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \ .vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true .mac_addr_from_csr = true
const struct iwl_cfg iwl9260_2ac_cfg = { const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260", .name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE, .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params, .ht_params = &iwl9000_ht_params,
...@@ -163,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = { ...@@ -163,4 +160,4 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK)); MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
...@@ -131,6 +131,8 @@ enum iwl_led_mode { ...@@ -131,6 +131,8 @@ enum iwl_led_mode {
#define IWL_MAX_WD_TIMEOUT 120000 #define IWL_MAX_WD_TIMEOUT 120000
#define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
NETIF_F_TSO | NETIF_F_TSO6)
/* Antenna presence definitions */ /* Antenna presence definitions */
#define ANT_NONE 0x0 #define ANT_NONE 0x0
...@@ -277,8 +279,6 @@ struct iwl_pwr_tx_backoff { ...@@ -277,8 +279,6 @@ struct iwl_pwr_tx_backoff {
* (.ucode) will be added to filename before loading from disk. The * (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode. * filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_ok: oldest version of the uCode API that is OK to load
* without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section * @max_inst_size: The maximal length of the fw inst section
* @max_data_size: The maximal length of the fw data section * @max_data_size: The maximal length of the fw data section
...@@ -324,7 +324,6 @@ struct iwl_cfg { ...@@ -324,7 +324,6 @@ struct iwl_cfg {
const char *name; const char *name;
const char *fw_name_pre; const char *fw_name_pre;
const unsigned int ucode_api_max; const unsigned int ucode_api_max;
const unsigned int ucode_api_ok;
const unsigned int ucode_api_min; const unsigned int ucode_api_min;
const enum iwl_device_family device_family; const enum iwl_device_family device_family;
const u32 max_data_size; const u32 max_data_size;
...@@ -439,7 +438,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg; ...@@ -439,7 +438,7 @@ extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg; extern const struct iwl_cfg iwl5165_2ac_cfg;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
......
...@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) ...@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_conf_tlv[i]); kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]); kfree(drv->fw.dbg_trigger_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
kfree(drv->fw.dbg_mem_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i); iwl_free_fw_img(drv, drv->fw.img + i);
...@@ -297,6 +299,7 @@ struct iwl_firmware_pieces { ...@@ -297,6 +299,7 @@ struct iwl_firmware_pieces {
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
}; };
/* /*
...@@ -1041,6 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1041,6 +1044,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
gscan_capa = true; gscan_capa = true;
break; break;
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
(void *)tlv_data;
u32 type;
if (tlv_len != (sizeof(*dbg_mem)))
goto invalid_tlv_len;
type = le32_to_cpu(dbg_mem->data_type);
drv->fw.dbg_dynamic_mem = true;
if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
IWL_ERR(drv,
"Skip unknown dbg mem segment: %u\n",
dbg_mem->data_type);
break;
}
if (pieces->dbg_mem_tlv[type]) {
IWL_ERR(drv,
"Ignore duplicate mem segment: %u\n",
dbg_mem->data_type);
break;
}
IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
dbg_mem->data_type);
pieces->dbg_mem_tlv[type] = dbg_mem;
break;
}
default: default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break; break;
...@@ -1060,11 +1094,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1060,11 +1094,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL; return -EINVAL;
} }
if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && /*
!gscan_capa, * If ucode advertises that it supports GSCAN but GSCAN
"GSCAN is supported but capabilities TLV is unavailable\n")) * capabilities TLV is not present, or if it has an old format,
* warn and continue without GSCAN.
*/
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
!gscan_capa) {
IWL_DEBUG_INFO(drv,
"GSCAN is supported but capabilities TLV is unavailable\n");
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
capa->_capa); capa->_capa);
}
return 0; return 0;
...@@ -1199,7 +1240,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1199,7 +1240,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int err; int err;
struct iwl_firmware_pieces *pieces; struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max; const unsigned int api_max = drv->cfg->ucode_api_max;
unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min; const unsigned int api_min = drv->cfg->ucode_api_min;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver; u32 api_ver;
...@@ -1212,20 +1252,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1212,20 +1252,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
if (!api_ok)
api_ok = api_max;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces) if (!pieces)
return; return;
if (!ucode_raw) { if (!ucode_raw)
if (drv->fw_index <= api_ok)
IWL_ERR(drv,
"request for firmware file '%s' failed.\n",
drv->firmware_name);
goto try_again; goto try_again;
}
IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
drv->firmware_name, ucode_raw->size); drv->firmware_name, ucode_raw->size);
...@@ -1248,10 +1280,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1248,10 +1280,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err) if (err)
goto try_again; goto try_again;
if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver;
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
/* /*
* api_ver should match the api version forming part of the * api_ver should match the api version forming part of the
...@@ -1267,19 +1296,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1267,19 +1296,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
api_max, api_ver); api_max, api_ver);
goto try_again; goto try_again;
} }
if (api_ver < api_ok) {
if (api_ok != api_max)
IWL_ERR(drv, "Firmware has old API version, "
"expected v%u through v%u, got v%u.\n",
api_ok, api_max, api_ver);
else
IWL_ERR(drv, "Firmware has old API version, "
"expected v%u, got v%u.\n",
api_max, api_ver);
IWL_ERR(drv, "New firmware can be obtained from "
"http://www.intellinuxwireless.org/.\n");
}
} }
/* /*
...@@ -1368,6 +1384,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1368,6 +1384,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
} }
} }
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
if (pieces->dbg_mem_tlv[i]) {
drv->fw.dbg_mem_tlv[i] =
kmemdup(pieces->dbg_mem_tlv[i],
sizeof(*drv->fw.dbg_mem_tlv[i]),
GFP_KERNEL);
if (!drv->fw.dbg_mem_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */ /* Now that we can no longer fail, copy information */
/* /*
...@@ -1560,9 +1587,7 @@ struct iwl_mod_params iwlwifi_mod_params = { ...@@ -1560,9 +1587,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1, .power_level = IWL_POWER_INDEX_1,
.d0i3_disable = true, .d0i3_disable = true,
.d0i3_entry_delay = 1000, .d0i3_entry_delay = 1000,
#ifndef CONFIG_IWLWIFI_UAPSD .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
.uapsd_disable = true,
#endif /* CONFIG_IWLWIFI_UAPSD */
/* the rest are 0 by default */ /* the rest are 0 by default */
}; };
IWL_EXPORT_SYMBOL(iwlwifi_mod_params); IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
...@@ -1681,12 +1706,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, ...@@ -1681,12 +1706,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
bool, S_IRUGO | S_IWUSR); uint, S_IRUGO | S_IWUSR);
#ifdef CONFIG_IWLWIFI_UAPSD MODULE_PARM_DESC(uapsd_disable,
MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
#else
MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
#endif
/* /*
* set bt_coex_active to true, uCode will do kill/defer * set bt_coex_active to true, uCode will do kill/defer
......
...@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type { ...@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_RADIO_REG = 13,
IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
IWL_FW_ERROR_DUMP_MAX, IWL_FW_ERROR_DUMP_MAX,
}; };
......
...@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type { ...@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
}; };
struct iwl_ucode_tlv { struct iwl_ucode_tlv {
...@@ -245,13 +246,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; ...@@ -245,13 +246,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
/** /**
* enum iwl_ucode_tlv_api - ucode api * enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan. * longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
* @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
* instead of 3. * instead of 3.
* @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
...@@ -260,12 +259,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; ...@@ -260,12 +259,10 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @NUM_IWL_UCODE_TLV_API: number of bits used * @NUM_IWL_UCODE_TLV_API: number of bits used
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
...@@ -324,6 +321,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; ...@@ -324,6 +321,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
* @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
* regular image. * regular image.
* @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
* memory addresses from the firmware.
* @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
* *
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/ */
...@@ -361,6 +361,8 @@ enum iwl_ucode_tlv_capa { ...@@ -361,6 +361,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
NUM_IWL_UCODE_TLV_CAPA NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__ #ifdef __CHECKER__
...@@ -490,6 +492,37 @@ enum iwl_fw_dbg_monitor_mode { ...@@ -490,6 +492,37 @@ enum iwl_fw_dbg_monitor_mode {
MIPI_MODE = 3, MIPI_MODE = 3,
}; };
/**
* enum iwl_fw_mem_seg_type - data types for dumping on error
*
* @FW_DBG_MEM_SMEM: the data type is SMEM
* @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
* @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
*/
enum iwl_fw_dbg_mem_seg_type {
FW_DBG_MEM_DCCM_LMAC = 0,
FW_DBG_MEM_DCCM_UMAC,
FW_DBG_MEM_SMEM,
/* Must be last */
FW_DBG_MEM_MAX,
};
/**
* struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
*
* @data_type: enum %iwl_fw_mem_seg_type
* @ofs: the memory segment offset
* @len: the memory segment length, in bytes
*
* This parses IWL_UCODE_TLV_FW_MEM_SEG
*/
struct iwl_fw_dbg_mem_seg_tlv {
__le32 data_type;
__le32 ofs;
__le32 len;
} __packed;
/** /**
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
* *
......
...@@ -286,6 +286,8 @@ struct iwl_fw { ...@@ -286,6 +286,8 @@ struct iwl_fw {
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
bool dbg_dynamic_mem;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa; struct iwl_gscan_capabilities gscan_capa;
......
...@@ -92,6 +92,11 @@ enum iwl_amsdu_size { ...@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
IWL_AMSDU_12K = 2, IWL_AMSDU_12K = 2,
}; };
enum iwl_uapsd_disable {
IWL_DISABLE_UAPSD_BSS = BIT(0),
IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
};
/** /**
* struct iwl_mod_params * struct iwl_mod_params
* *
...@@ -109,7 +114,8 @@ enum iwl_amsdu_size { ...@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
* @debug_level: levels are IWL_DL_* * @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0 * @ant_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file * @nvm_file: specifies a external NVM file
* @uapsd_disable: disable U-APSD, default = 1 * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
* @d0i3_disable: disable d0i3, default = 1, * @d0i3_disable: disable d0i3, default = 1,
* @d0i3_entry_delay: time to wait after no refs are taken before * @d0i3_entry_delay: time to wait after no refs are taken before
* entering D0i3 (in msecs) * entering D0i3 (in msecs)
...@@ -131,7 +137,7 @@ struct iwl_mod_params { ...@@ -131,7 +137,7 @@ struct iwl_mod_params {
#endif #endif
int ant_coupling; int ant_coupling;
char *nvm_file; char *nvm_file;
bool uapsd_disable; u32 uapsd_disable;
bool d0i3_disable; bool d0i3_disable;
unsigned int d0i3_entry_delay; unsigned int d0i3_entry_delay;
bool lar_disable; bool lar_disable;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -345,6 +347,16 @@ enum secure_load_status_reg { ...@@ -345,6 +347,16 @@ enum secure_load_status_reg {
#define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_DATA (0xa00448)
#define TXF_READ_MODIFY_ADDR (0xa0044c) #define TXF_READ_MODIFY_ADDR (0xa0044c)
/* UMAC Internal Tx Fifo */
#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538)
#define TXF_CPU2_WR_PTR (0xA00514)
#define TXF_CPU2_RD_PTR (0xA00510)
#define TXF_CPU2_FENCE_PTR (0xA00518)
#define TXF_CPU2_LOCK_FENCE (0xA00524)
#define TXF_CPU2_NUM (0xA0053C)
#define TXF_CPU2_READ_MODIFY_DATA (0xA00548)
#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C)
/* Radio registers access */ /* Radio registers access */
#define RSP_RADIO_CMD (0xa02804) #define RSP_RADIO_CMD (0xa02804)
#define RSP_RADIO_RDDAT (0xa02814) #define RSP_RADIO_RDDAT (0xa02814)
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -519,7 +521,7 @@ struct iwl_trans; ...@@ -519,7 +521,7 @@ struct iwl_trans;
struct iwl_trans_txq_scd_cfg { struct iwl_trans_txq_scd_cfg {
u8 fifo; u8 fifo;
s8 sta_id; u8 sta_id;
u8 tid; u8 tid;
bool aggregate; bool aggregate;
int frame_limit; int frame_limit;
......
...@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o ...@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o coex.o coex_legacy.o iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
......
...@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) ...@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
struct iwl_bt_coex_cmd bt_cmd = {}; struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode; u32 mode;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_send_bt_init_conf_old(mvm);
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
...@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, ...@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
return;
}
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
...@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret; int ret;
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
return;
}
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */ /* Ignore updates if we are in force mode */
...@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, ...@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type; enum iwl_bt_coex_lut_type lut_type;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return LINK_QUAL_AGG_TIME_LIMIT_DEF; return LINK_QUAL_AGG_TIME_LIMIT_DEF;
...@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, ...@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type; enum iwl_bt_coex_lut_type lut_type;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return true; return true;
...@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) ...@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant) if (ant & mvm->cfg->non_shared_ant)
return true; return true;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC; BT_HIGH_TRAFFIC;
} }
...@@ -877,9 +853,6 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) ...@@ -877,9 +853,6 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant) if (mvm->cfg->bt_shared_single_ant)
return true; return true;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
} }
...@@ -888,9 +861,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, ...@@ -888,9 +861,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
{ {
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
if (band != IEEE80211_BAND_2GHZ) if (band != IEEE80211_BAND_2GHZ)
return false; return false;
...@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, ...@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{ {
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_coex_vif_change_old(mvm);
return;
}
iwl_mvm_bt_coex_notif_handle(mvm); iwl_mvm_bt_coex_notif_handle(mvm);
} }
...@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, ...@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound; u8 __maybe_unused lower_bound, upper_bound;
u8 lut; u8 lut;
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
return;
}
if (!iwl_mvm_bt_is_plcr_supported(mvm)) if (!iwl_mvm_bt_is_plcr_supported(mvm))
return; return;
......
This diff is collapsed.
...@@ -75,7 +75,6 @@ ...@@ -75,7 +75,6 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
#define IWL_MVM_P2P_UAPSD_STANDALONE 0
#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
......
...@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO; return -EIO;
} }
ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
if (ret) if (ret)
return ret; return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
......
...@@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, ...@@ -1425,6 +1425,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
static const char * const chanwidths[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
[NL80211_CHAN_WIDTH_20] = "ht20",
[NL80211_CHAN_WIDTH_40] = "ht40",
[NL80211_CHAN_WIDTH_80] = "vht80",
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
[NL80211_CHAN_WIDTH_160] = "vht160",
};
static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct ieee80211_vif *vif = data;
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
u32 num_of_stations = le32_to_cpu(report->number_of_stations);
int i;
IWL_INFO(mvm, "LQM report:\n");
IWL_INFO(mvm, "\tstatus: %d\n", report->status);
IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
le32_to_cpu(report->tx_frame_dropped));
IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
le32_to_cpu(report->time_in_measurement_window));
IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
le32_to_cpu(report->total_air_time_other_stations));
IWL_INFO(mvm, "\tchannel_freq: %d\n",
vif->bss_conf.chandef.center_freq1);
IWL_INFO(mvm, "\tchannel_width: %s\n",
chanwidths[vif->bss_conf.chandef.width]);
IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
for (i = 0; i < num_of_stations; i++)
IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
report->frequent_stations_air_time[i]);
return true;
}
static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
char *buf, size_t count,
loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct iwl_notification_wait wait_lqm_notif;
static u16 lqm_notif[] = {
WIDE_ID(MAC_CONF_GROUP,
LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
};
int err;
u32 duration;
u32 timeout;
if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
return -EINVAL;
iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
lqm_notif, ARRAY_SIZE(lqm_notif),
iwl_mvm_lqm_notif_wait, vif);
mutex_lock(&mvm->mutex);
err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
duration, timeout);
mutex_unlock(&mvm->mutex);
if (err) {
IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
return err;
}
/* wait for 2 * timeout (safety guard) and convert to jiffies*/
timeout = msecs_to_jiffies((timeout * 2) / 1000);
err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
timeout);
if (err)
IWL_ERR(mvm, "Getting lqm notif timed out\n");
return count;
}
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
...@@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); ...@@ -1449,6 +1532,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{ {
...@@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -1488,6 +1572,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR); S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR); S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif) mvmvif == mvm->bf_allowed_vif)
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
*****************************************************************************/ *****************************************************************************/
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ieee80211.h> #include <linux/ieee80211.h>
#include <linux/netdevice.h>
#include "mvm.h" #include "mvm.h"
#include "fw-dbg.h" #include "fw-dbg.h"
...@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, ...@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
return pos; return pos;
} }
static
int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
char *buf, int pos, int bufsz)
{
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
BT_MBOX_PRINT(0, LE_PROF1, false);
BT_MBOX_PRINT(0, LE_PROF2, false);
BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
BT_MBOX_PRINT(0, CHL_SEQ_N, false);
BT_MBOX_PRINT(0, INBAND_S, false);
BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
BT_MBOX_PRINT(0, LE_SCAN, false);
BT_MBOX_PRINT(0, LE_ADV, false);
BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
BT_MBOX_PRINT(0, OPEN_CON_1, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
BT_MBOX_PRINT(1, IP_SR, false);
BT_MBOX_PRINT(1, LE_MSTR, false);
BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
BT_MBOX_PRINT(1, MSG_TYPE, false);
BT_MBOX_PRINT(1, SSN, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
BT_MBOX_PRINT(2, SNIFF_ACT, false);
BT_MBOX_PRINT(2, PAG, false);
BT_MBOX_PRINT(2, INQUIRY, false);
BT_MBOX_PRINT(2, CONN, false);
BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
BT_MBOX_PRINT(2, DISC, false);
BT_MBOX_PRINT(2, SCO_TX_ACT, false);
BT_MBOX_PRINT(2, SCO_RX_ACT, false);
BT_MBOX_PRINT(2, ESCO_RE_TX, false);
BT_MBOX_PRINT(2, SCO_DURATION, true);
pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
BT_MBOX_PRINT(3, SCO_STATE, false);
BT_MBOX_PRINT(3, SNIFF_STATE, false);
BT_MBOX_PRINT(3, A2DP_STATE, false);
BT_MBOX_PRINT(3, ACL_STATE, false);
BT_MBOX_PRINT(3, MSTR_STATE, false);
BT_MBOX_PRINT(3, OBX_STATE, false);
BT_MBOX_PRINT(3, OPEN_CON_2, false);
BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
BT_MBOX_PRINT(3, INBAND_P, false);
BT_MBOX_PRINT(3, MSG_TYPE_2, false);
BT_MBOX_PRINT(3, SSN_2, false);
BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
return pos;
}
static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iwl_mvm *mvm = file->private_data; struct iwl_mvm *mvm = file->private_data;
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
char *buf; char *buf;
int ret, pos = 0, bufsz = sizeof(char) * 1024; int ret, pos = 0, bufsz = sizeof(char) * 1024;
...@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, ...@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
if (!fw_has_api(&mvm->fw->ucode_capa, pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_profile_notif_old *notif = pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
&mvm->last_bt_notif_old; notif->bt_ci_compliance);
pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz); le32_to_cpu(notif->primary_ch_lut));
pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n", le32_to_cpu(notif->secondary_ch_lut));
notif->bt_ci_compliance); pos += scnprintf(buf + pos,
pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n", bufsz - pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->primary_ch_lut)); le32_to_cpu(notif->bt_activity_grading));
pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n", pos += scnprintf(buf + pos, bufsz - pos,
le32_to_cpu(notif->secondary_ch_lut)); "antenna isolation = %d CORUN LUT index = %d\n",
pos += scnprintf(buf+pos, mvm->last_ant_isol, mvm->last_corun_lut);
bufsz-pos, "bt_activity_grading = %d\n", pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
le32_to_cpu(notif->bt_activity_grading)); (notif->ttc_rrc_status >> 4) & 0xF);
pos += scnprintf(buf+pos, bufsz-pos, pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
"antenna isolation = %d CORUN LUT index = %d\n", notif->ttc_rrc_status & 0xF);
mvm->last_ant_isol, mvm->last_corun_lut);
pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
notif->rrc_enabled);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
notif->ttc_enabled);
} else {
struct iwl_bt_coex_profile_notif *notif =
&mvm->last_bt_notif;
pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
notif->bt_ci_compliance);
pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
le32_to_cpu(notif->primary_ch_lut));
pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf+pos,
bufsz-pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
pos += scnprintf(buf+pos, bufsz-pos,
"antenna isolation = %d CORUN LUT index = %d\n",
mvm->last_ant_isol, mvm->last_corun_lut);
pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
(notif->ttc_rrc_status >> 4) & 0xF);
pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
notif->ttc_rrc_status & 0xF);
}
pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
IWL_MVM_BT_COEX_SYNC2SCO); IWL_MVM_BT_COEX_SYNC2SCO);
...@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, ...@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iwl_mvm *mvm = file->private_data; struct iwl_mvm *mvm = file->private_data;
struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
char buf[256]; char buf[256];
int bufsz = sizeof(buf); int bufsz = sizeof(buf);
int pos = 0; int pos = 0;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
if (!fw_has_api(&mvm->fw->ucode_capa, pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { pos += scnprintf(buf + pos, bufsz - pos,
struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; "\tPrimary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_primary_ci));
pos += scnprintf(buf+pos, bufsz-pos, pos += scnprintf(buf + pos, bufsz - pos,
"Channel inhibition CMD\n"); "\tSecondary Channel Bitmap 0x%016llx\n",
pos += scnprintf(buf+pos, bufsz-pos, le64_to_cpu(cmd->bt_secondary_ci));
"\tPrimary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_primary_ci));
pos += scnprintf(buf+pos, bufsz-pos,
"\tSecondary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_secondary_ci));
pos += scnprintf(buf+pos, bufsz-pos,
"BT Configuration CMD - 0=default, 1=never, 2=always\n");
pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
mvm->bt_ack_kill_msk[0]);
pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
mvm->bt_cts_kill_msk[0]);
} else {
struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
pos += scnprintf(buf+pos, bufsz-pos,
"Channel inhibition CMD\n");
pos += scnprintf(buf+pos, bufsz-pos,
"\tPrimary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_primary_ci));
pos += scnprintf(buf+pos, bufsz-pos,
"\tSecondary Channel Bitmap 0x%016llx\n",
le64_to_cpu(cmd->bt_secondary_ci));
}
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
...@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, ...@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
struct iwl_rss_config_cmd cmd = { struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE), .flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_TCP |
IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
}; };
int ret, i, num_repeats, nbytes = count / 2; int ret, i, num_repeats, nbytes = count / 2;
...@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, ...@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
ARRAY_SIZE(cmd.indirection_table) % nbytes); ARRAY_SIZE(cmd.indirection_table) % nbytes);
memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
......
...@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info { ...@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
}; };
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
IWL_RX_L3_TYPE_IPV4,
IWL_RX_L3_TYPE_IPV4_FRAG,
IWL_RX_L3_TYPE_IPV6_FRAG,
IWL_RX_L3_TYPE_IPV6,
IWL_RX_L3_TYPE_IPV6_IN_IPV4,
IWL_RX_L3_TYPE_ARP,
IWL_RX_L3_TYPE_EAPOL,
};
#define IWL_RX_L3_PROTO_POS 4
enum iwl_rx_l3l4_flags { enum iwl_rx_l3l4_flags {
IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
IWL_RX_L3L4_TCP_ACK = BIT(3), IWL_RX_L3L4_TCP_ACK = BIT(3),
IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4, IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
}; };
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts { ...@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
#define IWL_BAR_DFAULT_RETRY_LIMIT 60 #define IWL_BAR_DFAULT_RETRY_LIMIT 60
#define IWL_LOW_RETRY_LIMIT 7 #define IWL_LOW_RETRY_LIMIT 7
/**
* enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
* @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
* from mac header end. For normal case it is 4 words for SNAP.
* note: tx_cmd, mac header and pad are not counted in the offset.
* This is used to help the offload in case there is tunneling such as
* IPv6 in IPv4, in such case the ip header offset should point to the
* inner ip header and IPv4 checksum of the external header should be
* calculated by driver.
* @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
* @TX_CMD_OFFLD_L3_EN: enable IP header checksum
* @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
* field. Doesn't include the pad.
* @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
* alignment
* @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
*/
enum iwl_tx_offload_assist_flags_pos {
TX_CMD_OFFLD_IP_HDR = 0,
TX_CMD_OFFLD_L4_EN = 6,
TX_CMD_OFFLD_L3_EN = 7,
TX_CMD_OFFLD_MH_SIZE = 8,
TX_CMD_OFFLD_PAD = 13,
TX_CMD_OFFLD_AMSDU = 14,
};
#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
/* TODO: complete documentation for try_cnt and btkill_cnt */ /* TODO: complete documentation for try_cnt and btkill_cnt */
/** /**
* struct iwl_tx_cmd - TX command struct to FW * struct iwl_tx_cmd - TX command struct to FW
* ( TX_CMD = 0x1c ) * ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details * @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_* * @tx_flags: combination of TX_CMD_FLG_*
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_* * cleared. Combination of RATE_MCS_*
...@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts { ...@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
*/ */
struct iwl_tx_cmd { struct iwl_tx_cmd {
__le16 len; __le16 len;
__le16 next_frame_len; __le16 offload_assist;
__le32 tx_flags; __le32 tx_flags;
struct { struct {
u8 try_cnt; u8 try_cnt;
...@@ -255,7 +286,7 @@ struct iwl_tx_cmd { ...@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
__le16 reserved4; __le16 reserved4;
u8 payload[0]; u8 payload[0];
struct ieee80211_hdr hdr[0]; struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_3 */ } __packed; /* TX_CMD_API_S_VER_6 */
/* /*
* TX response related data * TX response related data
......
...@@ -80,12 +80,39 @@ ...@@ -80,12 +80,39 @@
#include "fw-api-stats.h" #include "fw-api-stats.h"
#include "fw-api-tof.h" #include "fw-api-tof.h"
/* Tx queue numbers */ /* Tx queue numbers for non-DQA mode */
enum { enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8, IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9, IWL_MVM_CMD_QUEUE = 9,
}; };
/*
* DQA queue numbers
*
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
* that we are never left without the possibility to connect to an AP.
* @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
* Each MGMT queue is mapped to a single STA
* MGMT frames are frames that return true on ieee80211_is_mgmt()
* @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
* @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
* DATA frames are intended for !ieee80211_is_mgmt() frames, but if
* the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
* as well
* @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
*/
enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
};
enum iwl_mvm_tx_fifo { enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0, IWL_MVM_TX_FIFO_BK = 0,
IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_BE,
...@@ -279,6 +306,11 @@ enum { ...@@ -279,6 +306,11 @@ enum {
/* Please keep this enum *SORTED* by hex value. /* Please keep this enum *SORTED* by hex value.
* Needed for binary search, otherwise a warning will be triggered. * Needed for binary search, otherwise a warning will be triggered.
*/ */
enum iwl_mac_conf_subcmd_ids {
LINK_QUALITY_MEASUREMENT_CMD = 0x1,
LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
};
enum iwl_phy_ops_subcmd_ids { enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03, CTDP_CONFIG_CMD = 0x03,
...@@ -287,6 +319,10 @@ enum iwl_phy_ops_subcmd_ids { ...@@ -287,6 +319,10 @@ enum iwl_phy_ops_subcmd_ids {
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
}; };
enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
};
enum iwl_data_path_subcmd_ids { enum iwl_data_path_subcmd_ids {
UPDATE_MU_GROUPS_CMD = 0x1, UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
...@@ -302,6 +338,8 @@ enum iwl_prot_offload_subcmd_ids { ...@@ -302,6 +338,8 @@ enum iwl_prot_offload_subcmd_ids {
enum { enum {
LEGACY_GROUP = 0x0, LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1, LONG_GROUP = 0x1,
SYSTEM_GROUP = 0x2,
MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4, PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5, DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb, PROT_OFFLOAD_GROUP = 0xb,
...@@ -1923,6 +1961,7 @@ struct iwl_tdls_config_res { ...@@ -1923,6 +1961,7 @@ struct iwl_tdls_config_res {
#define TX_FIFO_MAX_NUM 8 #define TX_FIFO_MAX_NUM 8
#define RX_FIFO_MAX_NUM 2 #define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
/** /**
* Shared memory configuration information from the FW * Shared memory configuration information from the FW
...@@ -1940,6 +1979,12 @@ struct iwl_tdls_config_res { ...@@ -1940,6 +1979,12 @@ struct iwl_tdls_config_res {
* @page_buff_addr: used by UMAC and performance debug (page miss analysis), * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0 * when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr * @page_buff_size: size of %page_buff_addr
* @rxfifo_addr: Start address of rxFifo
* @internal_txfifo_addr: start address of internalFifo
* @internal_txfifo_size: internal fifos' size
*
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/ */
struct iwl_shared_mem_cfg { struct iwl_shared_mem_cfg {
__le32 shared_mem_addr; __le32 shared_mem_addr;
...@@ -1951,7 +1996,10 @@ struct iwl_shared_mem_cfg { ...@@ -1951,7 +1996,10 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_size[RX_FIFO_MAX_NUM]; __le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr; __le32 page_buff_addr;
__le32 page_buff_size; __le32 page_buff_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ __le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/** /**
* VHT MU-MIMO group configuration * VHT MU-MIMO group configuration
...@@ -2002,4 +2050,60 @@ struct iwl_stored_beacon_notif { ...@@ -2002,4 +2050,60 @@ struct iwl_stored_beacon_notif {
u8 data[MAX_STORED_BEACON_SIZE]; u8 data[MAX_STORED_BEACON_SIZE];
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
enum iwl_lqm_cmd_operatrions {
LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
};
enum iwl_lqm_status {
LQM_STATUS_SUCCESS = 0,
LQM_STATUS_TIMEOUT = 1,
LQM_STATUS_ABORT = 2,
};
/**
* Link Quality Measurement command
* @cmd_operatrion: command operation to be performed (start or stop)
* as defined above.
* @mac_id: MAC ID the measurement applies to.
* @measurement_time: time of the total measurement to be performed, in uSec.
* @timeout: maximum time allowed until a response is sent, in uSec.
*/
struct iwl_link_qual_msrmnt_cmd {
__le32 cmd_operation;
__le32 mac_id;
__le32 measurement_time;
__le32 timeout;
} __packed /* LQM_CMD_API_S_VER_1 */;
/**
* Link Quality Measurement notification
*
* @frequent_stations_air_time: an array containing the total air time
* (in uSec) used by the most frequently transmitting stations.
* @number_of_stations: the number of uniqe stations included in the array
* (a number between 0 to 16)
* @total_air_time_other_stations: the total air time (uSec) used by all the
* stations which are not included in the above report.
* @time_in_measurement_window: the total time in uSec in which a measurement
* took place.
* @tx_frame_dropped: the number of TX frames dropped due to retry limit during
* measurement
* @mac_id: MAC ID the measurement applies to.
* @status: return status. may be one of the LQM_STATUS_* defined above.
* @reserved: reserved.
*/
struct iwl_link_qual_msrmnt_notif {
__le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
__le32 number_of_stations;
__le32 total_air_time_other_stations;
__le32 time_in_measurement_window;
__le32 tx_frame_dropped;
__le32 mac_id;
__le32 status;
__le32 reserved[3];
} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, ...@@ -265,6 +265,65 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
*dump_data = iwl_fw_error_next_data(*dump_data); *dump_data = iwl_fw_error_next_data(*dump_data);
} }
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
fifo_hdr = (void *)(*dump_data)->data;
fifo_data = (void *)fifo_hdr->data;
fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
/* No need to try to read the data if the length is 0 */
if (fifo_len == 0)
continue;
/* Add a TLV for the internal FIFOs */
(*dump_data)->type =
cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
(*dump_data)->len =
cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
fifo_hdr->fifo_num = cpu_to_le32(i);
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_FIFO_ITEM_CNT));
fifo_hdr->wr_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_WR_PTR));
fifo_hdr->rd_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_RD_PTR));
fifo_hdr->fence_ptr =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_FENCE_PTR));
fifo_hdr->fence_mode =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
TXF_CPU2_LOCK_FENCE));
/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
iwl_trans_write_prph(mvm->trans,
TXF_CPU2_READ_MODIFY_ADDR,
TXF_CPU2_WR_PTR);
/* Dummy-read to advance the read pointer to head */
iwl_trans_read_prph(mvm->trans,
TXF_CPU2_READ_MODIFY_DATA);
/* Read FIFO */
fifo_len /= sizeof(u32); /* Size in DWORDS */
for (j = 0; j < fifo_len; j++)
fifo_data[j] =
iwl_trans_read_prph(mvm->trans,
TXF_CPU2_READ_MODIFY_DATA);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
}
iwl_trans_release_nic_access(mvm->trans, &flags); iwl_trans_release_nic_access(mvm->trans, &flags);
} }
...@@ -429,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -429,9 +488,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_trigger_desc *dump_trig; struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump; struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs; u32 sram_len, sram_ofs;
struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
mvm->fw->dbg_mem_tlv;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
u32 smem_len = mvm->cfg->smem_len; u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
u32 sram2_len = mvm->cfg->dccm2_len; u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
bool monitor_dump_only = false; bool monitor_dump_only = false;
int i; int i;
...@@ -494,6 +555,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -494,6 +555,22 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo); sizeof(struct iwl_fw_error_dump_fifo);
} }
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
i++) {
if (!mem_cfg->internal_txfifo_size[i])
continue;
/* Add header info */
fifo_data_len +=
mem_cfg->internal_txfifo_size[i] +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
/* Make room for PRPH registers */ /* Make room for PRPH registers */
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
/* The range includes both boundaries */ /* The range includes both boundaries */
...@@ -511,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -511,7 +588,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len = sizeof(*dump_file) + file_len = sizeof(*dump_file) +
sizeof(*dump_data) * 2 + sizeof(*dump_data) * 2 +
sram_len + sizeof(*dump_mem) +
fifo_data_len + fifo_data_len +
prph_len + prph_len +
radio_len + radio_len +
...@@ -525,6 +601,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -525,6 +601,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (sram2_len) if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
/* Make room for MEM segments */
for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
if (fw_dbg_mem[i])
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
le32_to_cpu(fw_dbg_mem[i]->len);
}
/* Make room for fw's virtual image pages, if it exists */ /* Make room for fw's virtual image pages, if it exists */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
file_len += mvm->num_of_paging_blk * file_len += mvm->num_of_paging_blk *
...@@ -550,6 +633,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -550,6 +633,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len; mvm->fw_dump_desc->len;
if (!mvm->fw->dbg_dynamic_mem)
file_len += sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len); dump_file = vzalloc(file_len);
if (!dump_file) { if (!dump_file) {
kfree(fw_error_dump); kfree(fw_error_dump);
...@@ -599,16 +685,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -599,16 +685,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (monitor_dump_only) if (monitor_dump_only)
goto dump_trans_data; goto dump_trans_data;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); if (!mvm->fw->dbg_dynamic_mem) {
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_mem = (void *)dump_data->data; dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); dump_mem = (void *)dump_data->data;
dump_mem->offset = cpu_to_le32(sram_ofs); dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, dump_mem->offset = cpu_to_le32(sram_ofs);
sram_len); iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
sram_len);
dump_data = iwl_fw_error_next_data(dump_data);
}
for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
if (fw_dbg_mem[i]) {
u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len +
sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
dump_mem->type = fw_dbg_mem[i]->data_type;
dump_mem->offset = cpu_to_le32(ofs);
iwl_trans_read_mem_bytes(mvm->trans, ofs,
dump_mem->data,
len);
dump_data = iwl_fw_error_next_data(dump_data);
}
}
if (smem_len) { if (smem_len) {
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
...@@ -616,10 +722,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -616,10 +722,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
dump_mem->data, smem_len); dump_mem->data, smem_len);
dump_data = iwl_fw_error_next_data(dump_data);
} }
if (sram2_len) { if (sram2_len) {
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
...@@ -627,11 +733,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -627,11 +733,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
dump_mem->data, sram2_len); dump_mem->data, sram2_len);
dump_data = iwl_fw_error_next_data(dump_data);
} }
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 && if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) { CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN + dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
sizeof(*dump_mem)); sizeof(*dump_mem));
...@@ -640,6 +746,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -640,6 +746,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET); dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET, iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
dump_mem->data, IWL8260_ICCM_LEN); dump_mem->data, IWL8260_ICCM_LEN);
dump_data = iwl_fw_error_next_data(dump_data);
} }
/* Dump fw's virtual image */ /* Dump fw's virtual image */
...@@ -649,7 +756,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -649,7 +756,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct page *pages = struct page *pages =
mvm->fw_paging_db[i].fw_paging_block; mvm->fw_paging_db[i].fw_paging_block;
dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) + dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE); PAGING_BLOCK_SIZE);
...@@ -657,10 +763,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -657,10 +763,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
paging->index = cpu_to_le32(i); paging->index = cpu_to_le32(i);
memcpy(paging->data, page_address(pages), memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE); PAGING_BLOCK_SIZE);
dump_data = iwl_fw_error_next_data(dump_data);
} }
} }
dump_data = iwl_fw_error_next_data(dump_data);
if (prph_len) if (prph_len)
iwl_dump_prph(mvm->trans, &dump_data); iwl_dump_prph(mvm->trans, &dump_data);
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
* *
*****************************************************************************/ *****************************************************************************/
#include <net/mac80211.h> #include <net/mac80211.h>
#include <linux/netdevice.h>
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-op-mode.h" #include "iwl-op-mode.h"
...@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) ...@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_rss_config_cmd cmd = { struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE), .flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_TCP |
IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
}; };
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; cmd.indirection_table[i] =
memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); 1 + (i % (mvm->trans->num_rx_queues - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
} }
...@@ -174,8 +179,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) ...@@ -174,8 +179,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
} }
} }
if (sec_idx >= IWL_UCODE_SECTION_MAX) { /*
IWL_ERR(mvm, "driver didn't find paging image\n"); * If paging is enabled there should be at least 2 more sections left
* (one for CSS and one for Paging data)
*/
if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm); iwl_free_fw_paging(mvm);
return -EINVAL; return -EINVAL;
} }
...@@ -410,7 +419,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) ...@@ -410,7 +419,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
goto exit; goto exit;
} }
mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, /* Add an extra page for headers */
mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
FW_PAGING_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!mvm->trans->paging_download_buf) { if (!mvm->trans->paging_download_buf) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -641,7 +652,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -641,7 +652,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/ */
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; if (iwl_mvm_is_dqa_supported(mvm))
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
else
mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0); atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
...@@ -788,17 +802,22 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ...@@ -788,17 +802,22 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{ {
struct iwl_host_cmd cmd = { struct iwl_host_cmd cmd = {
.id = SHARED_MEM_CFG,
.flags = CMD_WANT_SKB, .flags = CMD_WANT_SKB,
.data = { NULL, }, .data = { NULL, },
.len = { 0, }, .len = { 0, },
}; };
struct iwl_rx_packet *pkt;
struct iwl_shared_mem_cfg *mem_cfg; struct iwl_shared_mem_cfg *mem_cfg;
struct iwl_rx_packet *pkt;
u32 i; u32 i;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
else
cmd.id = SHARED_MEM_CFG;
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return; return;
...@@ -824,6 +843,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) ...@@ -824,6 +843,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
le32_to_cpu(mem_cfg->page_buff_addr); le32_to_cpu(mem_cfg->page_buff_addr);
mvm->shared_mem_cfg.page_buff_size = mvm->shared_mem_cfg.page_buff_size =
le32_to_cpu(mem_cfg->page_buff_size); le32_to_cpu(mem_cfg->page_buff_size);
/* new API has more data */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
mvm->shared_mem_cfg.rxfifo_addr =
le32_to_cpu(mem_cfg->rxfifo_addr);
mvm->shared_mem_cfg.internal_txfifo_addr =
le32_to_cpu(mem_cfg->internal_txfifo_addr);
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
for (i = 0;
i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i++)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
iwl_free_resp(&cmd); iwl_free_resp(&cmd);
......
...@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, ...@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
.exclude_vif = exclude_vif, .exclude_vif = exclude_vif,
.used_hw_queues = .used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) | BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue) | BIT(mvm->aux_queue),
BIT(IWL_MVM_CMD_QUEUE),
}; };
if (iwl_mvm_is_dqa_supported(mvm))
data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
else
data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */ /* mark all VIF used hw queues */
...@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0; return 0;
} }
/* Find available queues, and allocate them to the ACs */ /*
* Find available queues, and allocate them to the ACs. When in
* DQA-mode they aren't really used, and this is done only so the
* mac80211 ieee80211_check_queues() function won't fail
*/
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues, u8 queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue); mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) { if (!iwl_mvm_is_dqa_supported(mvm) &&
queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n"); IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO; ret = -EIO;
goto exit_fail; goto exit_fail;
...@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */ /* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) { if (vif->type == NL80211_IFTYPE_AP) {
u8 queue = find_first_zero_bit(&used_hw_queues, u8 queue;
mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) { if (!iwl_mvm_is_dqa_supported(mvm)) {
IWL_ERR(mvm, "Failed to allocate cab queue\n"); queue = find_first_zero_bit(&used_hw_queues,
ret = -EIO; mvm->first_agg_queue);
goto exit_fail;
if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate cab queue\n");
ret = -EIO;
goto exit_fail;
}
} else {
queue = IWL_MVM_DQA_GCAST_QUEUE;
} }
vif->cab_queue = queue; vif->cab_queue = queue;
...@@ -495,6 +510,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -495,6 +510,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */ /* fall through */
default: default:
/* If DQA is supported - queues will be enabled when needed */
if (iwl_mvm_is_dqa_supported(mvm))
break;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac], vif->hw_queue[ac],
...@@ -523,6 +542,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -523,6 +542,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_MAX_TID_COUNT, 0); IWL_MAX_TID_COUNT, 0);
/* fall through */ /* fall through */
default: default:
/*
* If DQA is supported - queues were already disabled, since in
* DQA-mode the queues are a property of the STA and not of the
* vif, and at this point the STA was already deleted
*/
if (iwl_mvm_is_dqa_supported(mvm))
break;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac], vif->hw_queue[ac],
......
...@@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -665,12 +665,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
} }
hw->netdev_features |= mvm->cfg->features; hw->netdev_features |= mvm->cfg->features;
if (!iwl_mvm_is_csum_supported(mvm)) if (!iwl_mvm_is_csum_supported(mvm)) {
hw->netdev_features &= ~NETIF_F_RXCSUM; hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
NETIF_F_RXCSUM);
if (IWL_MVM_SW_TX_CSUM_OFFLOAD) /* We may support SW TX CSUM */
hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
NETIF_F_TSO | NETIF_F_TSO6; hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
}
ret = ieee80211_register_hw(mvm->hw); ret = ieee80211_register_hw(mvm->hw);
if (ret) if (ret)
...@@ -992,6 +993,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -992,6 +993,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm); iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
...@@ -1178,6 +1180,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) ...@@ -1178,6 +1180,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work); flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk); flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
cancel_delayed_work_sync(&mvm->fw_dump_wk); cancel_delayed_work_sync(&mvm->fw_dump_wk);
iwl_mvm_free_fw_dump_desc(mvm); iwl_mvm_free_fw_dump_desc(mvm);
...@@ -1821,6 +1824,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -1821,6 +1824,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
mvmvif->lqm_active)
iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
0, 0);
/* /*
* If we're not associated yet, take the (new) BSSID before associating * If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old * so the firmware knows. If we're already associated, then use the old
...@@ -2340,7 +2348,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2340,7 +2348,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return; return;
} }
if (iwlwifi_mod_params.uapsd_disable) { if (!vif->p2p &&
(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return; return;
} }
...@@ -2376,6 +2385,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, ...@@ -2376,6 +2385,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action); peer_addr, action);
} }
static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
struct iwl_mvm_tid_data *tid_data;
struct sk_buff *skb;
int i;
spin_lock_bh(&mvm_sta->lock);
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
tid_data = &mvm_sta->tid_data[i];
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
ieee80211_free_txskb(mvm->hw, skb);
}
spin_unlock_bh(&mvm_sta->lock);
}
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
...@@ -2396,6 +2421,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2396,6 +2421,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* if a STA is being removed, reuse its ID */ /* if a STA is being removed, reuse its ID */
flush_work(&mvm->sta_drained_wk); flush_work(&mvm->sta_drained_wk);
/*
* If we are in a STA removal flow and in DQA mode:
*
* This is after the sync_rcu part, so the queues have already been
* flushed. No more TXs on their way in mac80211's path, and no more in
* the queues.
* Also, we won't be getting any new TX frames for this station.
* What we might have are deferred TX frames that need to be taken care
* of.
*
* Drop any still-queued deferred-frame before removing the STA, and
* make sure the worker is no longer handling frames for this STA.
*/
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST &&
iwl_mvm_is_dqa_supported(mvm)) {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
/*
* No need to make sure deferred TX indication is off since the
* worker will already remove it if it was on
*/
}
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST && if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) { new_state == IEEE80211_STA_NONE) {
...@@ -3628,6 +3680,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, ...@@ -3628,6 +3680,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break; break;
case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_STATION:
if (mvmvif->lqm_active)
iwl_mvm_send_lqm_cmd(vif,
LQM_CMD_OPERATION_STOP_MEASUREMENT,
0, 0);
/* Schedule the time event to a bit before beacon 1, /* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the * to make sure we're in the new channel when the
* GO/AP arrives. * GO/AP arrives.
...@@ -3727,6 +3784,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, ...@@ -3727,6 +3784,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
if (!vif || vif->type != NL80211_IFTYPE_STATION) if (!vif || vif->type != NL80211_IFTYPE_STATION)
return; return;
/* Make sure we're done with the deferred traffic before flushing */
if (iwl_mvm_is_dqa_supported(mvm))
flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif = iwl_mvm_vif_from_mac80211(vif);
......
...@@ -208,7 +208,7 @@ enum iwl_power_scheme { ...@@ -208,7 +208,7 @@ enum iwl_power_scheme {
}; };
#define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask { enum iwl_dbgfs_pm_mask {
...@@ -453,6 +453,12 @@ struct iwl_mvm_vif { ...@@ -453,6 +453,12 @@ struct iwl_mvm_vif {
/* TCP Checksum Offload */ /* TCP Checksum Offload */
netdev_features_t features; netdev_features_t features;
/*
* link quality measurement - used to check whether this interface
* is in the middle of a link quality measurement
*/
bool lqm_active;
}; };
static inline struct iwl_mvm_vif * static inline struct iwl_mvm_vif *
...@@ -602,6 +608,9 @@ struct iwl_mvm_shared_mem_cfg { ...@@ -602,6 +608,9 @@ struct iwl_mvm_shared_mem_cfg {
u32 rxfifo_size[RX_FIFO_MAX_NUM]; u32 rxfifo_size[RX_FIFO_MAX_NUM];
u32 page_buff_addr; u32 page_buff_addr;
u32 page_buff_size; u32 page_buff_size;
u32 rxfifo_addr;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
}; };
struct iwl_mvm { struct iwl_mvm {
...@@ -656,10 +665,17 @@ struct iwl_mvm { ...@@ -656,10 +665,17 @@ struct iwl_mvm {
/* Map to HW queue */ /* Map to HW queue */
u32 hw_queue_to_mac80211; u32 hw_queue_to_mac80211;
u8 hw_queue_refcount; u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
/*
* This is to mark that queue is reserved for a STA but not yet
* allocated. This is needed to make sure we have at least one
* available queue to use when adding a new STA
*/
bool setup_reserved; bool setup_reserved;
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
} queue_info[IWL_MAX_HW_QUEUES]; } queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name; const char *nvm_file_name;
...@@ -679,11 +695,11 @@ struct iwl_mvm { ...@@ -679,11 +695,11 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info; struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk; struct work_struct sta_drained_wk;
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT]; atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions; u8 rx_ba_sessions;
u32 secret_key[IWL_RSS_HASH_KEY_CNT];
/* configured by mac80211 */ /* configured by mac80211 */
u32 rts_threshold; u32 rts_threshold;
...@@ -694,6 +710,7 @@ struct iwl_mvm { ...@@ -694,6 +710,7 @@ struct iwl_mvm {
struct iwl_mcast_filter_cmd *mcast_filter_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
struct timer_list scan_timer;
/* max number of simultaneous scans the FW supports */ /* max number of simultaneous scans the FW supports */
unsigned int max_scans; unsigned int max_scans;
...@@ -1063,7 +1080,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) ...@@ -1063,7 +1080,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
{ {
return fw_has_capa(&mvm->fw->ucode_capa, return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
IWL_MVM_P2P_UAPSD_STANDALONE; !(iwlwifi_mod_params.uapsd_disable &
IWL_DISABLE_UAPSD_P2P_CLIENT);
} }
static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
...@@ -1297,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm); ...@@ -1297,6 +1315,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout(unsigned long data);
/* Scheduled scan */ /* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
...@@ -1453,22 +1472,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, ...@@ -1453,22 +1472,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac); struct ieee80211_tx_info *info, u8 ac);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
enum ieee80211_band band);
void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* beacon filtering */ /* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
void void
...@@ -1634,4 +1637,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, ...@@ -1634,4 +1637,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg); const char *errmsg);
/* Link Quality Measurement */
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout);
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
#endif /* __IWL_MVM_H__ */ #endif /* __IWL_MVM_H__ */
...@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
...@@ -418,6 +418,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { ...@@ -418,6 +418,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(REPLY_DEBUG_CMD), HCMD_NAME(REPLY_DEBUG_CMD),
}; };
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
};
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
};
/* Please keep this array *SORTED* by hex value. /* Please keep this array *SORTED* by hex value.
* Access is done through binary search * Access is done through binary search
*/ */
...@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { ...@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_arr iwl_mvm_groups[] = { static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
...@@ -562,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -562,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock); spin_lock_init(&mvm->refs_lock);
...@@ -601,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -601,7 +619,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; if (iwl_mvm_is_dqa_supported(mvm))
trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
else
trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true; trans_cfg.scd_set_active = true;
...@@ -707,8 +728,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -707,8 +728,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm); iwl_mvm_tof_init(mvm);
/* init RSS hash key */ setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); (unsigned long)mvm);
return op_mode; return op_mode;
...@@ -765,6 +786,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) ...@@ -765,6 +786,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm); iwl_mvm_tof_clean(mvm);
del_timer_sync(&mvm->scan_timer);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
ieee80211_free_hw(mvm->hw); ieee80211_free_hw(mvm->hw);
} }
......
...@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, ...@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
} }
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP; cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
......
...@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, ...@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
u16 flags = le16_to_cpu(desc->l3l4_flags);
u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
IWL_RX_L3_PROTO_POS);
if (mvmvif->features & NETIF_F_RXCSUM && if (mvmvif->features & NETIF_F_RXCSUM &&
desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) && flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK)) (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
l3_prot == IWL_RX_L3_TYPE_IPV6 ||
l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include "mvm.h" #include "mvm.h"
#include "fw-api-scan.h" #include "fw-api-scan.h"
#include "iwl-io.h"
#define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1 #define IWL_SPARSE_EBS_SCAN_RATIO 1
...@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw, ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
} }
mvm->last_ebs_successful = mvm->last_ebs_successful =
...@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) ...@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS | SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS | SCAN_CONFIG_FLAG_SET_RX_CHAINS |
SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
SCAN_CONFIG_FLAG_SET_ALL_TIMES | SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_MAC_ADDR |
...@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) ...@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO; return -EIO;
} }
#define SCAN_TIMEOUT (16 * HZ)
void iwl_mvm_scan_timeout(unsigned long data)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)data;
IWL_ERR(mvm, "regular scan timed out\n");
del_timer(&mvm->scan_timer);
iwl_force_nmi(mvm->trans);
}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req, struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies) struct ieee80211_scan_ies *ies)
...@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
return 0; return 0;
} }
...@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
ieee80211_scan_completed(mvm->hw, aborted); ieee80211_scan_completed(mvm->hw, aborted);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
...@@ -1607,6 +1628,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) ...@@ -1607,6 +1628,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
* to release the scan reference here. * to release the scan reference here.
*/ */
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
del_timer(&mvm->scan_timer);
if (notify) if (notify)
ieee80211_scan_completed(mvm->hw, true); ieee80211_scan_completed(mvm->hw, true);
} else if (notify) { } else if (notify) {
......
...@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, ...@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
} }
} }
if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { if (sta) {
BUILD_BUG_ON(sizeof(sf_full_timeout) != BUILD_BUG_ON(sizeof(sf_full_timeout) !=
sizeof(__le32) * SF_NUM_SCENARIO * sizeof(__le32) * SF_NUM_SCENARIO *
SF_NUM_TIMEOUT_TYPES); SF_NUM_TIMEOUT_TYPES);
...@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, ...@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
int ret = 0; int ret = 0;
if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
sf_cmd.state = cpu_to_le32(new_state);
if (mvm->cfg->disable_dummy_notification) if (mvm->cfg->disable_dummy_notification)
sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
...@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, ...@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
switch (new_state) { switch (new_state) {
case SF_UNINIT: case SF_UNINIT:
if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break; break;
case SF_FULL_ON: case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) { if (sta_id == IWL_MVM_STATION_COUNT) {
......
...@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, ...@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
/* send station add/update command to firmware */ /* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update) bool update, unsigned int flags)
{ {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd add_sta_cmd = { struct iwl_mvm_add_sta_cmd add_sta_cmd = {
...@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 status; u32 status;
u32 agg_size = 0, mpdu_dens = 0; u32 agg_size = 0, mpdu_dens = 0;
if (!update) { if (!update || (flags & STA_MODIFY_QUEUES)) {
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
if (flags & STA_MODIFY_QUEUES)
add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
} }
switch (sta->bandwidth) { switch (sta->bandwidth) {
...@@ -274,6 +277,211 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, ...@@ -274,6 +277,211 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
} }
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = iwl_mvm_ac_to_tx_fifo[ac],
.sta_id = mvmsta->sta_id,
.tid = tid,
.frame_limit = IWL_FRAME_LIMIT,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
int ssn;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
/*
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
* exists
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
queue);
/* If no such queue is found, we'll use a DATA queue instead */
}
if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
queue = mvmsta->reserved_queue;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
}
if (queue < 0)
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue >= 0)
mvm->queue_info[queue].setup_reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: support shared queues for same RA */
if (queue < 0)
return -ENOSPC;
/*
* Actual en/disablement of aggregations is through the ADD_STA HCMD,
* but for configuring the SCD to send A-MPDUs we need to mark the queue
* as aggregatable.
* Mark all DATA queues as allowing to be aggregated at some point
*/
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
queue, mvmsta->sta_id, tid);
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
wdg_timeout);
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tfd_queue_msk |= BIT(queue);
if (mvmsta->reserved_queue == queue)
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
spin_unlock_bh(&mvmsta->lock);
return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
}
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
return IEEE80211_AC_VO; /* MGMT */
return tid_to_mac80211_ac[tid];
}
static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct sk_buff_head deferred_tx;
u8 mac_queue;
bool no_queue = false; /* Marks if there is a problem with the queue */
u8 ac;
lockdep_assert_held(&mvm->mutex);
skb = skb_peek(&tid_data->deferred_tx_frames);
if (!skb)
return;
hdr = (void *)skb->data;
ac = iwl_mvm_tid_to_ac_queue(tid);
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
IWL_ERR(mvm,
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
mvmsta->sta_id, tid);
/*
* Mark queue as problematic so later the deferred traffic is
* freed, as we can do nothing with it
*/
no_queue = true;
}
__skb_queue_head_init(&deferred_tx);
/* Disable bottom-halves when entering TX path */
local_bh_disable();
spin_lock(&mvmsta->lock);
skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
spin_unlock(&mvmsta->lock);
while ((skb = __skb_dequeue(&deferred_tx)))
if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
ieee80211_free_txskb(mvm->hw, skb);
local_bh_enable();
/* Wake queue */
iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
}
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
int sta_id, tid;
mutex_lock(&mvm->mutex);
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
clear_bit(sta_id, mvm->sta_deferred_frames);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
for_each_set_bit(tid, &deferred_tid_traffic,
IWL_MAX_TID_COUNT + 1)
iwl_mvm_tx_deferred_stream(mvm, sta, tid);
}
mutex_unlock(&mvm->mutex);
}
static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum nl80211_iftype vif_type)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
else
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC;
}
mvm->queue_info[queue].setup_reserved = true;
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta->reserved_queue = queue;
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
return 0;
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
...@@ -314,18 +522,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -314,18 +522,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_tdls_sta_init(mvm, sta); ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret) if (ret)
return ret; return ret;
} else { } else if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
} }
/* for HW restart - reset everything but the sequence number */ /* for HW restart - reset everything but the sequence number */
for (i = 0; i < IWL_MAX_TID_COUNT; i++) { for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_sta->tid_data[i].seq_number; u16 seq = mvm_sta->tid_data[i].seq_number;
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq; mvm_sta->tid_data[i].seq_number = seq;
if (!iwl_mvm_is_dqa_supported(mvm))
continue;
/*
* Mark all queues for this STA as unallocated and defer TX
* frames until the queue is allocated
*/
mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
} }
mvm_sta->deferred_traffic_tid_map = 0;
mvm_sta->agg_tids = 0; mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) && if (iwl_mvm_has_new_rx_api(mvm) &&
...@@ -338,7 +557,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -338,7 +557,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data; mvm_sta->dup_data = dup_data;
} }
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); if (iwl_mvm_is_dqa_supported(mvm)) {
ret = iwl_mvm_reserve_sta_stream(mvm, sta,
ieee80211_vif_type_p2p(vif));
if (ret)
goto err;
}
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret) if (ret)
goto err; goto err;
...@@ -364,7 +590,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm, ...@@ -364,7 +590,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
return iwl_mvm_sta_send_to_fw(mvm, sta, true); return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
} }
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
...@@ -509,6 +735,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) ...@@ -509,6 +735,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvm_sta)
{
int ac;
int i;
lockdep_assert_held(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
continue;
ac = iwl_mvm_tid_to_ac_queue(i);
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
vif->hw_queue[ac], i, 0);
mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
}
}
int iwl_mvm_rm_sta(struct iwl_mvm *mvm, int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
...@@ -537,6 +783,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -537,6 +783,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret; return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
/* if we are associated - we can't remove the AP STA now */ /* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc) if (vif->bss_conf.assoc)
return ret; return ret;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -79,6 +79,60 @@ ...@@ -79,6 +79,60 @@
struct iwl_mvm; struct iwl_mvm;
struct iwl_mvm_vif; struct iwl_mvm_vif;
/**
* DOC: DQA - Dynamic Queue Allocation -introduction
*
* Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
* driver to allow dynamic allocation of queues on-demand, rather than allocate
* them statically ahead of time. Ideally, we would like to allocate one queue
* per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
* even if it also needs to send traffic to a sleeping STA1, without being
* blocked by the sleeping station.
*
* Although the queues in DQA mode are dynamically allocated, there are still
* some queues that are statically allocated:
* TXQ #0 - command queue
* TXQ #1 - aux frames
* TXQ #2 - P2P device frames
* TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
* TXQ #4 - BSS DATA frames queue
* TXQ #5-8 - Non-QoS and MGMT frames queue pool
* TXQ #9 - P2P GO/SoftAP probe responses
* TXQ #10-31 - DATA frames queue pool
* The queues are dynamically taken from either the MGMT frames queue pool or
* the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
* queue.
*
* When a frame for a previously unseen RA/TID comes in, it needs to be deferred
* until a queue is allocated for it, and only then can be TXed. Therefore, it
* is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
* %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
*
* For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
* queues in the pool. If there is no longer a free MGMT queue to allocate, a
* queue will be allocated from the DATA pool instead. Since QoS NDPs can create
* a problem for aggregations, they too will use a MGMT queue.
*
* When adding a STA, a DATA queue is reserved for it so that it can TX from
* it. If no such free queue exists for reserving, the STA addition will fail.
*
* If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
* new RA/TID comes in for an existing STA, one of the STA's queues will become
* shared and will serve more than the single TID (but always for the same RA!).
*
* When a RA/TID needs to become aggregated, no new queue is required to be
* allocated, only mark the queue as aggregated via the ADD_STA command. Note,
* however, that a shared queue cannot be aggregated, and only after the other
* TIDs become inactive and are removed - only then can the queue be
* reconfigured and become aggregated.
*
* When removing a station, its queues are returned to the pool for reuse. Here
* we also need to make sure that we are synced with the worker thread that TXes
* the deferred frames so we don't get into a situation where the queues are
* removed and then the worker puts deferred frames onto the released queues or
* tries to allocate new queues for a STA we don't need anymore.
*/
/** /**
* DOC: station table - introduction * DOC: station table - introduction
* *
...@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state { ...@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
/** /**
* struct iwl_mvm_tid_data - holds the states for each RA / TID * struct iwl_mvm_tid_data - holds the states for each RA / TID
* @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use * @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked. * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++). * This is basically (last acked packet++).
...@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state { ...@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* @state: state of the BA agreement establishment / tear down. * @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session * @txq_id: Tx queue used by the BA session / DQA
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
* the first packet to be sent in legacy HW queue in Tx AGG stop flow. * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
...@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state { ...@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
* @tx_time: medium time consumed by this A-MPDU * @tx_time: medium time consumed by this A-MPDU
*/ */
struct iwl_mvm_tid_data { struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames;
u16 seq_number; u16 seq_number;
u16 next_reclaimed; u16 next_reclaimed;
/* The rest is Tx AGG related */ /* The rest is Tx AGG related */
...@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data { ...@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data {
* we need to signal the EOSP * we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock. * and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
* @reserved_queue: the queue reserved for this STA for DQA purposes
* Every STA has is given one reserved queue to allow it to operate. If no
* such queue can be guaranteed, the STA addition will fail.
* @tx_protection: reference counter for controlling the Tx protection. * @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection? * @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled? * @disable_tx: is tx to this STA disabled?
...@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data { ...@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only. * the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures * @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data * @dup_data: per queue duplicate packet detection data
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* *
* When mac80211 creates a station it reserves some space (hw->sta_data_size) * When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that * in the structure for use by driver. This structure is placed in that
...@@ -345,12 +405,16 @@ struct iwl_mvm_sta { ...@@ -345,12 +405,16 @@ struct iwl_mvm_sta {
bool bt_reduced_txpower; bool bt_reduced_txpower;
bool next_status_eosp; bool next_status_eosp;
spinlock_t lock; spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
struct iwl_lq_sta lq_sta; struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data; struct iwl_mvm_rxq_dup_data *dup_data;
u16 deferred_traffic_tid_map;
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */ /* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection; s8 tx_protection;
bool tt_tx_protection; bool tt_tx_protection;
...@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta { ...@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta {
u32 tfd_queue_msk; u32 tfd_queue_msk;
}; };
/**
* Send the STA info to the FW.
*
* @mvm: the iwl_mvm* to use
* @sta: the STA
* @update: this is true if the FW is being updated about a STA it already knows
* about. Otherwise (if this is a new STA), this should be false.
* @flags: if update==true, this marks what is being changed via ORs of values
* from enum iwl_sta_modify_flag. Otherwise, this is ignored.
*/
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update); bool update, unsigned int flags);
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
...@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, ...@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif, struct iwl_mvm_vif *mvmvif,
bool disable); bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
#endif /* __sta_h__ */ #endif /* __sta_h__ */
...@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ...@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
return; return;
/*
* We are now handling a temperature notification from the firmware
* in ASYNC and hold the mutex. thermal_notify_framework will call
* us back through get_temp() which ought to send a SYNC command to
* the firmware and hence to take the mutex.
* Avoid the deadlock by unlocking the mutex here.
*/
if (mvm->tz_device.tzone) { if (mvm->tz_device.tzone) {
struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
mutex_unlock(&mvm->mutex);
thermal_notify_framework(tz_dev->tzone, thermal_notify_framework(tz_dev->tzone,
tz_dev->fw_trips_index[ths_crossed]); tz_dev->fw_trips_index[ths_crossed]);
mutex_lock(&mvm->mutex);
} }
#endif /* CONFIG_THERMAL */ #endif /* CONFIG_THERMAL */
} }
...@@ -796,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, ...@@ -796,9 +787,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
{ {
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
return -EBUSY;
*state = mvm->cooling_dev.cur_state; *state = mvm->cooling_dev.cur_state;
return 0; return 0;
...@@ -813,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, ...@@ -813,9 +801,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
return -EIO; return -EIO;
if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
return -EBUSY;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/ipv6.h>
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-eeprom-parse.h" #include "iwl-eeprom-parse.h"
...@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, ...@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
addr, tid, ssn); addr, tid, ssn);
} }
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd)
{
#if IS_ENABLED(CONFIG_INET)
u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
u8 protocol = 0;
/*
* Do not compute checksum if already computed or if transport will
* compute it
*/
if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
return;
/* We do not expect to be requested to csum stuff we do not support */
if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6)),
"No support for requested checksum\n")) {
skb_checksum_help(skb);
return;
}
if (skb->protocol == htons(ETH_P_IP)) {
protocol = ip_hdr(skb)->protocol;
} else {
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ipv6h =
(struct ipv6hdr *)skb_network_header(skb);
unsigned int off = sizeof(*ipv6h);
protocol = ipv6h->nexthdr;
while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
/* only supported extension headers */
if (protocol != NEXTHDR_ROUTING &&
protocol != NEXTHDR_HOP &&
protocol != NEXTHDR_DEST &&
protocol != NEXTHDR_FRAGMENT) {
skb_checksum_help(skb);
return;
}
if (protocol == NEXTHDR_FRAGMENT) {
struct frag_hdr *hp =
OPT_HDR(struct frag_hdr, skb, off);
protocol = hp->nexthdr;
off += sizeof(struct frag_hdr);
} else {
struct ipv6_opt_hdr *hp =
OPT_HDR(struct ipv6_opt_hdr, skb, off);
protocol = hp->nexthdr;
off += ipv6_optlen(hp);
}
}
/* if we get here - protocol now should be TCP/UDP */
#endif
}
if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
WARN_ON_ONCE(1);
skb_checksum_help(skb);
return;
}
/* enable L4 csum */
offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
/*
* Set offset to IP header (snap).
* We don't support tunneling so no need to take care of inner header.
* Size is in words.
*/
offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
if (skb->protocol == htons(ETH_P_IP) &&
(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
ip_hdr(skb)->check = 0;
offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
}
/* reset UDP/TCP header csum */
if (protocol == IPPROTO_TCP)
tcp_hdr(skb)->check = 0;
else
udp_hdr(skb)->check = 0;
/* mac header len should include IV, size is in words */
if (info->control.hw_key)
mh_len += info->control.hw_key->iv_len;
mh_len /= 2;
offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
tx_cmd->offload_assist = cpu_to_le16(offload_assist);
#endif
}
/* /*
* Sets most of the Tx cmd's fields * Sets most of the Tx cmd's fields
*/ */
...@@ -126,6 +232,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -126,6 +232,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf; tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL; tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
tx_cmd->offload_assist |=
cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
} else if (ieee80211_is_back_req(fc)) { } else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data; struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control); u16 control = le16_to_cpu(bar->control);
...@@ -186,9 +295,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -186,9 +295,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Total # bytes to be transmitted */ /* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len + tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)info->driver_data[0]); (uintptr_t)info->driver_data[0]);
tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id; tx_cmd->sta_id = sta_id;
/* padding is inserted later in transport */
if (ieee80211_hdrlen(fc) % 4 &&
!(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
} }
/* /*
...@@ -459,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -459,6 +574,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len; unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf; u8 *qc, tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
...@@ -477,6 +593,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -477,6 +593,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
goto segment; goto segment;
} }
/*
* Do not build AMSDU for IPv6 with extension headers.
* ask stack to segment and checkum the generated MPDUs for us.
*/
if (skb->protocol == htons(ETH_P_IPV6) &&
((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
IPPROTO_TCP) {
num_subframes = 1;
pad = 0;
netdev_features &= ~NETIF_F_CSUM_MASK;
goto segment;
}
/* /*
* No need to lock amsdu_in_ampdu_allowed since it can't be modified * No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session. * during an BA session.
...@@ -570,7 +699,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -570,7 +699,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
skb_shinfo(skb)->gso_size = num_subframes * mss; skb_shinfo(skb)->gso_size = num_subframes * mss;
memcpy(cb, skb->cb, sizeof(cb)); memcpy(cb, skb->cb, sizeof(cb));
next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); next = skb_gso_segment(skb, netdev_features);
skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_size = mss;
if (WARN_ON_ONCE(IS_ERR(next))) if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL; return -EINVAL;
...@@ -632,6 +761,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -632,6 +761,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
} }
#endif #endif
static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta, u8 tid,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 mac_queue = info->hw_queue;
struct sk_buff_head *deferred_tx_frames;
lockdep_assert_held(&mvm_sta->lock);
mvm_sta->deferred_traffic_tid_map |= BIT(tid);
set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
skb_queue_tail(deferred_tx_frames, skb);
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
*/
if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
"RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
skb_queue_len(deferred_tx_frames))) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
}
/* /*
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
*/ */
...@@ -647,7 +805,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -647,7 +805,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 seq_number = 0; u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT; u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue; u8 txq_id = info->hw_queue;
bool is_data_qos = false, is_ampdu = false; bool is_ampdu = false;
int hdrlen; int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
...@@ -687,8 +845,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -687,8 +845,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ; seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number); hdr->seq_ctrl |= cpu_to_le16(seq_number);
is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
} else if (iwl_mvm_is_dqa_supported(mvm) &&
(ieee80211_is_qos_nullfunc(fc) ||
ieee80211_is_nullfunc(fc))) {
/*
* nullfunc frames should go to the MGMT queue regardless of QOS
*/
tid = IWL_MAX_TID_COUNT;
txq_id = mvmsta->tid_data[tid].txq_id;
} }
/* Copy MAC header from skb into command buffer */ /* Copy MAC header from skb into command buffer */
...@@ -709,13 +874,30 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -709,13 +874,30 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
} }
if (iwl_mvm_is_dqa_supported(mvm)) {
if (unlikely(mvmsta->tid_data[tid].txq_id ==
IEEE80211_INVAL_HW_QUEUE)) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
txq_id = mvmsta->tid_data[tid].txq_id;
}
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta; goto drop_unlock_sta;
if (is_data_qos && !ieee80211_has_morefrags(fc)) if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
mvmsta->tid_data[tid].seq_number = seq_number + 0x10; mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
......
...@@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) ...@@ -491,98 +491,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
} }
static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table_v1 table;
u32 base;
base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
base = mvm->fw->init_errlog_ptr;
} else {
if (!base)
base = mvm->fw->inst_errlog_ptr;
}
if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
(mvm->cur_ucode == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
mvm->status, table.valid);
}
/* Do not change this output - scripts rely on it */
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink2, table.ilink1, table.ilink2,
table.bcon_time, table.gp1, table.gp2,
table.gp3, table.ucode_ver, 0,
table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{ {
struct iwl_trans *trans = mvm->trans; struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table; struct iwl_error_event_table table;
u32 base; u32 base;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
iwl_mvm_dump_nic_error_log_old(mvm);
return;
}
base = mvm->error_event_table; base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) { if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base) if (!base)
...@@ -694,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -694,6 +608,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount++; mvm->queue_info[queue].hw_queue_refcount++;
if (mvm->queue_info[queue].hw_queue_refcount > 1) if (mvm->queue_info[queue].hw_queue_refcount > 1)
enable_queue = false; enable_queue = false;
else
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -779,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -779,6 +695,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
return; return;
} }
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
/* Make sure queue info is correct even though we overwrite it */ /* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount || WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap || mvm->queue_info[queue].tid_bitmap ||
...@@ -1079,3 +997,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1079,3 +997,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
out: out:
ieee80211_connection_loss(vif); ieee80211_connection_loss(vif);
} }
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)
{
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_link_qual_msrmnt_cmd cmd = {
.cmd_operation = cpu_to_le32(operation),
.mac_id = cpu_to_le32(mvm_vif->id),
.measurement_time = cpu_to_le32(duration),
.timeout = cpu_to_le32(timeout),
};
u32 cmdid =
iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
int ret;
if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
return -EOPNOTSUPP;
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return -EINVAL;
switch (operation) {
case LQM_CMD_OPERATION_START_MEASUREMENT:
if (iwl_mvm_lqm_active(mvm_vif->mvm))
return -EBUSY;
if (!vif->bss_conf.assoc)
return -EINVAL;
mvm_vif->lqm_active = true;
break;
case LQM_CMD_OPERATION_STOP_MEASUREMENT:
if (!iwl_mvm_lqm_active(mvm_vif->mvm))
return -EINVAL;
break;
default:
return -EINVAL;
}
ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
&cmd);
/* command failed - roll back lqm_active state */
if (ret) {
mvm_vif->lqm_active =
operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
}
return ret;
}
static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
bool *lqm_active = _data;
*lqm_active = *lqm_active || mvm_vif->lqm_active;
}
bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
{
bool ret = false;
lockdep_assert_held(&mvm->mutex);
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_lqm_active_iterator, &ret);
return ret;
}
...@@ -483,17 +483,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -483,17 +483,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
{0} {0}
...@@ -651,10 +653,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -651,10 +653,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The PCI device starts with a reference taken and we are /* The PCI device starts with a reference taken and we are
* supposed to release it here. But to simplify the * supposed to release it here. But to simplify the
* interaction with the opmode, we don't do it now, but let * interaction with the opmode, we don't do it now, but let
* the opmode release it when it's ready. To account for this * the opmode release it when it's ready.
* reference, we start with ref_count set to 1.
*/ */
trans_pcie->ref_count = 1;
return 0; return 0;
......
...@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page { ...@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
struct iwl_trans_pcie { struct iwl_trans_pcie {
struct iwl_rxq *rxq; struct iwl_rxq *rxq;
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba; struct iwl_rb_allocator rba;
struct iwl_trans *trans; struct iwl_trans *trans;
struct iwl_drv *drv; struct iwl_drv *drv;
...@@ -403,10 +403,6 @@ struct iwl_trans_pcie { ...@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
bool cmd_hold_nic_awake; bool cmd_hold_nic_awake;
bool ref_cmd_in_flight; bool ref_cmd_in_flight;
/* protect ref counter */
spinlock_t ref_lock;
u32 ref_count;
dma_addr_t fw_mon_phys; dma_addr_t fw_mon_phys;
struct page *fw_mon_page; struct page *fw_mon_page;
u32 fw_mon_size; u32 fw_mon_size;
......
...@@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, ...@@ -210,8 +210,12 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
if (trans->cfg->mq_rx_supported) if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
rxq->write_actual); rxq->write_actual);
else /*
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
* hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
* not wake the NIC.
*/
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
} }
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
...@@ -908,6 +912,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -908,6 +912,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
allocator_pool_size = trans->num_rx_queues * allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size; num_alloc = queue_size + allocator_pool_size;
BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
ARRAY_SIZE(trans_pcie->rx_pool));
for (i = 0; i < num_alloc; i++) { for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
...@@ -1805,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) ...@@ -1805,7 +1811,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id; struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans; struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw; u32 inta_fh, inta_hw;
lock_map_acquire(&trans->sync_cmd_lockdep_map); lock_map_acquire(&trans->sync_cmd_lockdep_map);
......
...@@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1321,6 +1321,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
* after this call. * after this call.
*/ */
iwl_pcie_reset_ict(trans); iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
...@@ -1434,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, ...@@ -1434,7 +1435,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
int ret, i; int ret, i;
if (trans->cfg->mq_rx_supported) { if (trans->cfg->mq_rx_supported) {
max_vector = min_t(u32, (num_possible_cpus() + 1), max_vector = min_t(u32, (num_possible_cpus() + 2),
IWL_MAX_RX_HW_QUEUES); IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_vector; i++) for (i = 0; i < max_vector; i++)
trans_pcie->msix_entries[i].entry = i; trans_pcie->msix_entries[i].entry = i;
...@@ -1465,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, ...@@ -1465,7 +1466,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
ret = pci_enable_msi(pdev); ret = pci_enable_msi(pdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
/* enable rfkill interrupt: hw bug w/a */ /* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
...@@ -1499,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, ...@@ -1499,8 +1500,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
IWL_ERR(trans_pcie->trans, IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i); "Error allocating IRQ %d\n", i);
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
free_irq(trans_pcie->msix_entries[i].vector, free_irq(trans_pcie->msix_entries[j].vector,
&trans_pcie->msix_entries[i]); &trans_pcie->msix_entries[j]);
pci_disable_msix(pdev); pci_disable_msix(pdev);
return ret; return ret;
} }
...@@ -1694,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) ...@@ -1694,6 +1695,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
} }
free_percpu(trans_pcie->tso_hdr_page); free_percpu(trans_pcie->tso_hdr_page);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans); iwl_trans_free(trans);
} }
...@@ -2014,38 +2016,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, ...@@ -2014,38 +2016,32 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
void iwl_trans_pcie_ref(struct iwl_trans *trans) void iwl_trans_pcie_ref(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable) if (iwlwifi_mod_params.d0i3_disable)
return; return;
spin_lock_irqsave(&trans_pcie->ref_lock, flags);
IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
trans_pcie->ref_count++;
pm_runtime_get(&trans_pcie->pci_dev->dev); pm_runtime_get(&trans_pcie->pci_dev->dev);
spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
#ifdef CONFIG_PM
IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
#endif /* CONFIG_PM */
} }
void iwl_trans_pcie_unref(struct iwl_trans *trans) void iwl_trans_pcie_unref(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable) if (iwlwifi_mod_params.d0i3_disable)
return; return;
spin_lock_irqsave(&trans_pcie->ref_lock, flags);
IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
return;
}
trans_pcie->ref_count--;
pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); #ifdef CONFIG_PM
IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
#endif /* CONFIG_PM */
} }
static const char *get_csr_string(int cmd) static const char *get_csr_string(int cmd)
...@@ -2793,7 +2789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -2793,7 +2789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->trans = trans; trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->reg_lock);
spin_lock_init(&trans_pcie->ref_lock);
mutex_init(&trans_pcie->mutex); mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->ucode_write_waitq);
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
......
...@@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb) ...@@ -596,6 +596,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
} }
} }
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->reg_lock);
if (trans_pcie->ref_cmd_in_flight) {
trans_pcie->ref_cmd_in_flight = false;
IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
iwl_trans_pcie_unref(trans);
}
if (!trans->cfg->base_params->apmg_wake_up_wa)
return;
if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
return;
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/* /*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/ */
...@@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) ...@@ -620,6 +642,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
} }
iwl_pcie_txq_free_tfd(trans, txq); iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
if (q->read_ptr == q->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
q->id);
iwl_trans_pcie_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
}
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
} }
txq->active = false; txq->active = false;
...@@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, ...@@ -1148,29 +1184,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0; return 0;
} }
static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
lockdep_assert_held(&trans_pcie->reg_lock);
if (trans_pcie->ref_cmd_in_flight) {
trans_pcie->ref_cmd_in_flight = false;
IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
iwl_trans_pcie_unref(trans);
}
if (trans->cfg->base_params->apmg_wake_up_wa) {
if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
return 0;
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
return 0;
}
/* /*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
* *
...@@ -2197,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2197,6 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
__le16 fc; __le16 fc;
u8 hdr_len; u8 hdr_len;
u16 wifi_seq; u16 wifi_seq;
bool amsdu;
txq = &trans_pcie->txq[txq_id]; txq = &trans_pcie->txq[txq_id];
q = &txq->q; q = &txq->q;
...@@ -2288,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2288,11 +2302,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/ */
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
tb1_len = ALIGN(len, 4); /* do not align A-MSDU to dword as the subframe header aligns it */
amsdu = ieee80211_is_data_qos(fc) &&
/* Tell NIC about any 2-byte padding after MAC header */ (*ieee80211_get_qos_ctl(hdr) &
if (tb1_len != len) IEEE80211_QOS_CTL_A_MSDU_PRESENT);
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; if (trans_pcie->sw_csum_tx || !amsdu) {
tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
} else {
tb1_len = len;
}
/* The first TB points to the scratchbuf data - min_copy bytes */ /* The first TB points to the scratchbuf data - min_copy bytes */
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
...@@ -2310,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2310,8 +2331,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err; goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
if (ieee80211_is_data_qos(fc) && if (amsdu) {
(*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
out_meta, dev_cmd, out_meta, dev_cmd,
tb1_len))) tb1_len)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment