Commit fafa7424 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2020-01-11' of...

Merge tag 'iwlwifi-next-for-kalle-2020-01-11' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

First set of patches intended for v5.6

* Support new versions of the FTM FW APIs;
* Fix an old bug in D3 (WoWLAN);
* A couple of fixes/improvements in the receive-buffers code;
* Fix in the debugging where we were skipping one TXQ;
* Support new version of the beacon template FW API;
* Print some extra information when the driver is loaded;
* Some debugging infrastructure (aka. yoyo) updates;
* Support for a new HW version;
* Second phase of device configuration work started;
* Some clean-ups;
parents e07c5f2e 0b295a1e
...@@ -77,8 +77,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { ...@@ -77,8 +77,7 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.trans.base_params = &iwl1000_base_params, \ .trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl1000_bgn_cfg = { const struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
...@@ -104,8 +103,7 @@ const struct iwl_cfg iwl1000_bg_cfg = { ...@@ -104,8 +103,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_params = &iwl1000_eeprom_params, \ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \ .rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl100_bgn_cfg = { const struct iwl_cfg iwl100_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 100 BGN", .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
......
...@@ -103,8 +103,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { ...@@ -103,8 +103,7 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.trans.base_params = &iwl2000_base_params, \ .trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \ .eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl2000_2bgn_cfg = { const struct iwl_cfg iwl2000_2bgn_cfg = {
...@@ -131,8 +130,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { ...@@ -131,8 +130,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.trans.base_params = &iwl2030_base_params, \ .trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \ .eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl2030_2bgn_cfg = { const struct iwl_cfg iwl2030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
...@@ -153,8 +151,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { ...@@ -153,8 +151,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \ .eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \ .rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl105_bgn_cfg = { const struct iwl_cfg iwl105_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN", .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
...@@ -181,8 +178,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { ...@@ -181,8 +178,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_params = &iwl20x0_eeprom_params, \ .eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true, \ .rx_with_siso_diversity = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl135_bgn_cfg = { const struct iwl_cfg iwl135_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN", .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
......
...@@ -75,8 +75,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { ...@@ -75,8 +75,7 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.trans.base_params = &iwl5000_base_params, \ .trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \ .eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl5300_agn_cfg = { const struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN", .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
...@@ -125,7 +124,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { ...@@ -125,7 +124,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.ht_params = &iwl5000_ht_params, .ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK, .led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true, .internal_wimax_coex = true,
.trans.csr = &iwl_csr_v1,
}; };
#define IWL_DEVICE_5150 \ #define IWL_DEVICE_5150 \
...@@ -141,8 +139,7 @@ const struct iwl_cfg iwl5350_agn_cfg = { ...@@ -141,8 +139,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_params = &iwl5000_eeprom_params, \ .eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \ .internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl5150_agn_cfg = { const struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN", .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
......
...@@ -124,8 +124,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { ...@@ -124,8 +124,7 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.trans.base_params = &iwl6000_g2_base_params, \ .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6005_2agn_cfg = { const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
...@@ -179,8 +178,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { ...@@ -179,8 +178,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \ .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6030_2agn_cfg = { const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
...@@ -216,8 +214,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = { ...@@ -216,8 +214,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.trans.base_params = &iwl6000_g2_base_params, \ .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6035_2agn_cfg = { const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
...@@ -272,8 +269,7 @@ const struct iwl_cfg iwl130_bg_cfg = { ...@@ -272,8 +269,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.trans.base_params = &iwl6000_base_params, \ .trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6000i_2agn_cfg = { const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
...@@ -306,8 +302,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { ...@@ -306,8 +302,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \ .internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6050_2agn_cfg = { const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
...@@ -333,8 +328,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = { ...@@ -333,8 +328,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_params = &iwl6000_eeprom_params, \ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \ .led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \ .internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
.trans.csr = &iwl_csr_v1
const struct iwl_cfg iwl6150_bgn_cfg = { const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
...@@ -361,7 +355,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = { ...@@ -361,7 +355,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_params = &iwl6000_eeprom_params, .eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params, .ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK, .led_mode = IWL_LED_BLINK,
.trans.csr = &iwl_csr_v1,
}; };
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
......
...@@ -154,8 +154,7 @@ static const struct iwl_ht_params iwl7000_ht_params = { ...@@ -154,8 +154,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.nvm_hw_section_num = 0, \ .nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \ .non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.dccm_offset = IWL7000_DCCM_OFFSET, \ .dccm_offset = IWL7000_DCCM_OFFSET
.trans.csr = &iwl_csr_v1
#define IWL_DEVICE_7000 \ #define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
......
...@@ -151,8 +151,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { ...@@ -151,8 +151,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.apmg_not_supported = true, \ .apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \ .dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \ .min_umac_error_event_table = 0x800000
.trans.csr = &iwl_csr_v1
#define IWL_DEVICE_8000 \ #define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \ IWL_DEVICE_8000_COMMON, \
......
...@@ -138,13 +138,13 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -138,13 +138,13 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.thermal_params = &iwl9000_tt_params, \ .thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \ .apmg_not_supported = true, \
.trans.mq_rx_supported = true, \ .trans.mq_rx_supported = true, \
.num_rbds = 512, \
.vht_mu_mimo_supported = true, \ .vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \ .mac_addr_from_csr = true, \
.trans.rf_id = true, \ .trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \ .dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \ .min_umac_error_event_table = 0x800000, \
.trans.csr = &iwl_csr_v1, \
.d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 92 * 1024, \ .d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \ .ht_params = &iwl9000_ht_params, \
...@@ -171,6 +171,12 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -171,6 +171,12 @@ static const struct iwl_tt_params iwl9000_tt_params = {
}, \ }, \
} }
const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
.base_params = &iwl9000_base_params,
.mq_rx_supported = true,
.rf_id = true,
};
const struct iwl_cfg iwl9160_2ac_cfg = { const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160", .name = "Intel(R) Dual Band Wireless AC 9160",
...@@ -184,8 +190,10 @@ const struct iwl_cfg iwl9260_2ac_cfg = { ...@@ -184,8 +190,10 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
IWL_DEVICE_9000, IWL_DEVICE_9000,
}; };
const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
const struct iwl_cfg iwl9260_2ac_160_cfg = { const struct iwl_cfg iwl9260_2ac_160_cfg = {
.name = "Intel(R) Wireless-AC 9260 160MHz",
.fw_name_pre = IWL9260_FW_PRE, .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
}; };
......
...@@ -1255,7 +1255,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ...@@ -1255,7 +1255,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/ ************************/
hw = iwl_alloc_all(); hw = iwl_alloc_all();
if (!hw) { if (!hw) {
pr_err("%s: Cannot allocate network device\n", cfg->name); pr_err("%s: Cannot allocate network device\n", trans->name);
goto out; goto out;
} }
...@@ -1390,7 +1390,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, ...@@ -1390,7 +1390,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register * 2. Read REV register
***********************/ ***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n", IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, priv->trans->hw_rev); priv->trans->name, priv->trans->hw_rev);
if (iwl_trans_start_hw(priv->trans)) if (iwl_trans_start_hw(priv->trans))
goto out_free_hw; goto out_free_hw;
......
...@@ -240,7 +240,7 @@ enum iwl_tof_responder_cfg_flags { ...@@ -240,7 +240,7 @@ enum iwl_tof_responder_cfg_flags {
}; };
/** /**
* struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
...@@ -258,7 +258,7 @@ enum iwl_tof_responder_cfg_flags { ...@@ -258,7 +258,7 @@ enum iwl_tof_responder_cfg_flags {
* @bssid: Current AP BSSID * @bssid: Current AP BSSID
* @reserved2: reserved * @reserved2: reserved
*/ */
struct iwl_tof_responder_config_cmd { struct iwl_tof_responder_config_cmd_v6 {
__le32 cmd_valid_fields; __le32 cmd_valid_fields;
__le32 responder_cfg_flags; __le32 responder_cfg_flags;
u8 bandwidth; u8 bandwidth;
...@@ -274,6 +274,42 @@ struct iwl_tof_responder_config_cmd { ...@@ -274,6 +274,42 @@ struct iwl_tof_responder_config_cmd {
__le16 reserved2; __le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
/**
* struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
* bits 4 - 7: &enum iwl_location_bw.
* @rate: current AP rate
* @channel_num: current AP Channel
* @ctrl_ch_position: coding of the control channel position relative to
* the center frequency, see iwl_mvm_get_ctrl_pos()
* @sta_id: index of the AP STA when in AP mode
* @reserved1: reserved
* @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
* purposes, simulating station movement by adding various values
* to this field
* @common_calib: XVT: common calibration value
* @specific_calib: XVT: specific calibration value
* @bssid: Current AP BSSID
* @reserved2: reserved
*/
struct iwl_tof_responder_config_cmd {
__le32 cmd_valid_fields;
__le32 responder_cfg_flags;
u8 format_bw;
u8 rate;
u8 channel_num;
u8 ctrl_ch_position;
u8 sta_id;
u8 reserved1;
__le16 toa_offset;
__le16 common_calib;
__le16 specific_calib;
u8 bssid[ETH_ALEN];
__le16 reserved2;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400 #define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/** /**
...@@ -403,7 +439,7 @@ enum iwl_initiator_ap_flags { ...@@ -403,7 +439,7 @@ enum iwl_initiator_ap_flags {
}; };
/** /**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number * @channel_num: AP Channel number
* @bandwidth: AP bandwidth. One of iwl_tof_bandwidth. * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth.
...@@ -420,7 +456,7 @@ enum iwl_initiator_ap_flags { ...@@ -420,7 +456,7 @@ enum iwl_initiator_ap_flags {
* @reserved: For alignment and future use * @reserved: For alignment and future use
* @tsf_delta: not in use * @tsf_delta: not in use
*/ */
struct iwl_tof_range_req_ap_entry { struct iwl_tof_range_req_ap_entry_v3 {
__le32 initiator_ap_flags; __le32 initiator_ap_flags;
u8 channel_num; u8 channel_num;
u8 bandwidth; u8 bandwidth;
...@@ -434,6 +470,72 @@ struct iwl_tof_range_req_ap_entry { ...@@ -434,6 +470,72 @@ struct iwl_tof_range_req_ap_entry {
__le32 tsf_delta; __le32 tsf_delta;
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */ } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
/**
* enum iwl_location_frame_format - location frame formats
* @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy
* @IWL_LOCATION_FRAME_FORMAT_HT: HT
* @IWL_LOCATION_FRAME_FORMAT_VHT: VHT
* @IWL_LOCATION_FRAME_FORMAT_HE: HE
*/
enum iwl_location_frame_format {
IWL_LOCATION_FRAME_FORMAT_LEGACY,
IWL_LOCATION_FRAME_FORMAT_HT,
IWL_LOCATION_FRAME_FORMAT_VHT,
IWL_LOCATION_FRAME_FORMAT_HE,
};
/**
* enum iwl_location_bw - location bandwidth selection
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
IWL_LOCATION_BW_40MHZ,
IWL_LOCATION_BW_80MHZ,
};
#define HLTK_11AZ_LEN 32
#define TK_11AZ_LEN 32
#define LOCATION_BW_POS 4
/**
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
* bits 4 - 7: &enum iwl_location_bw.
* @ctrl_ch_position: Coding of the control channel position relative to the
* center frequency, see iwl_mvm_get_ctrl_pos().
* @ftmr_max_retries: Max number of retries to send the FTMR in case of no
* reply from the AP.
* @bssid: AP's BSSID
* @burst_period: Recommended value to be sent to the AP. Measurement
* periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
* @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @reserved: For alignment and future use
* @hltk: HLTK to be used for secured 11az measurement
* @tk: TK to be used for secured 11az measurement
*/
struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
u8 ctrl_ch_position;
u8 ftmr_max_retries;
u8 bssid[ETH_ALEN];
__le16 burst_period;
u8 samples_per_burst;
u8 num_of_bursts;
__le16 reserved;
u8 hltk[HLTK_11AZ_LEN];
u8 tk[TK_11AZ_LEN];
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */
/** /**
* enum iwl_tof_response_mode * enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
...@@ -535,6 +637,38 @@ struct iwl_tof_range_req_cmd_v5 { ...@@ -535,6 +637,38 @@ struct iwl_tof_range_req_cmd_v5 {
} __packed; } __packed;
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
/**
* struct iwl_tof_range_req_cmd_v7 - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
* @macaddr_template: MAC address template to use for non-randomized bits
* @req_timeout_ms: Requested timeout of the response in units of milliseconds.
* This is the session time for completing the measurement.
* @tsf_mac_id: report the measurement start time for each ap in terms of the
* TSF of this mac id. 0xff to disable TSF reporting.
* @common_calib: The common calib value to inject to this measurement calc
* @specific_calib: The specific calib value to inject to this measurement calc
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
struct iwl_tof_range_req_cmd_v7 {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
u8 range_req_bssid[ETH_ALEN];
u8 macaddr_mask[ETH_ALEN];
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/** /**
* struct iwl_tof_range_req_cmd - start measurement cmd * struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags * @initiator_flags: see flags @ iwl_tof_initiator_flags
...@@ -565,7 +699,7 @@ struct iwl_tof_range_req_cmd { ...@@ -565,7 +699,7 @@ struct iwl_tof_range_req_cmd {
__le16 common_calib; __le16 common_calib;
__le16 specific_calib; __le16 specific_calib;
struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/* /*
* enum iwl_tof_range_request_status - status of the sent request * enum iwl_tof_range_request_status - status of the sent request
......
...@@ -921,21 +921,6 @@ struct iwl_scan_probe_params_v4 { ...@@ -921,21 +921,6 @@ struct iwl_scan_probe_params_v4 {
#define SCAN_MAX_NUM_CHANS_V3 67 #define SCAN_MAX_NUM_CHANS_V3 67
/**
* struct iwl_scan_channel_params_v3
* @flags: channel flags &enum iwl_scan_channel_flags
* @count: num of channels in scan request
* @reserved: for future use and alignment
* @channel_config: array of explicit channel configurations
* for 2.4Ghz and 5.2Ghz bands
*/
struct iwl_scan_channel_params_v3 {
u8 flags;
u8 count;
__le16 reserved;
struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_3 */
/** /**
* struct iwl_scan_channel_params_v4 * struct iwl_scan_channel_params_v4
* @flags: channel flags &enum iwl_scan_channel_flags * @flags: channel flags &enum iwl_scan_channel_flags
...@@ -1010,20 +995,6 @@ struct iwl_scan_periodic_parms_v1 { ...@@ -1010,20 +995,6 @@ struct iwl_scan_periodic_parms_v1 {
__le16 reserved; __le16 reserved;
} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ } __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
/**
* struct iwl_scan_req_params_v11
* @general_params: &struct iwl_scan_general_params_v10
* @channel_params: &struct iwl_scan_channel_params_v3
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v3
*/
struct iwl_scan_req_params_v11 {
struct iwl_scan_general_params_v10 general_params;
struct iwl_scan_channel_params_v3 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v3 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_11 */
/** /**
* struct iwl_scan_req_params_v12 * struct iwl_scan_req_params_v12
* @general_params: &struct iwl_scan_general_params_v10 * @general_params: &struct iwl_scan_general_params_v10
...@@ -1052,18 +1023,6 @@ struct iwl_scan_req_params_v13 { ...@@ -1052,18 +1023,6 @@ struct iwl_scan_req_params_v13 {
struct iwl_scan_probe_params_v4 probe_params; struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */ } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
* struct iwl_scan_req_umac_v11
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
struct iwl_scan_req_umac_v11 {
__le32 uid;
__le32 ooc_priority;
struct iwl_scan_req_params_v11 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_11 */
/** /**
* struct iwl_scan_req_umac_v12 * struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets * @uid: scan id, &enum iwl_umac_scan_uid_offsets
......
...@@ -813,6 +813,7 @@ enum iwl_mac_beacon_flags { ...@@ -813,6 +813,7 @@ enum iwl_mac_beacon_flags {
IWL_MAC_BEACON_ANT_A = BIT(9), IWL_MAC_BEACON_ANT_A = BIT(9),
IWL_MAC_BEACON_ANT_B = BIT(10), IWL_MAC_BEACON_ANT_B = BIT(10),
IWL_MAC_BEACON_ANT_C = BIT(11), IWL_MAC_BEACON_ANT_C = BIT(11),
IWL_MAC_BEACON_FILS = BIT(12),
}; };
/** /**
...@@ -820,6 +821,7 @@ enum iwl_mac_beacon_flags { ...@@ -820,6 +821,7 @@ enum iwl_mac_beacon_flags {
* @byte_cnt: byte count of the beacon frame. * @byte_cnt: byte count of the beacon frame.
* @flags: least significant byte for rate code. The most significant byte * @flags: least significant byte for rate code. The most significant byte
* is &enum iwl_mac_beacon_flags. * is &enum iwl_mac_beacon_flags.
* @short_ssid: Short SSID
* @reserved: reserved * @reserved: reserved
* @template_id: currently equal to the mac context id of the coresponding mac. * @template_id: currently equal to the mac context id of the coresponding mac.
* @tim_idx: the offset of the tim IE in the beacon * @tim_idx: the offset of the tim IE in the beacon
...@@ -831,14 +833,15 @@ enum iwl_mac_beacon_flags { ...@@ -831,14 +833,15 @@ enum iwl_mac_beacon_flags {
struct iwl_mac_beacon_cmd { struct iwl_mac_beacon_cmd {
__le16 byte_cnt; __le16 byte_cnt;
__le16 flags; __le16 flags;
__le64 reserved; __le32 short_ssid;
__le32 reserved;
__le32 template_id; __le32 template_id;
__le32 tim_idx; __le32 tim_idx;
__le32 tim_size; __le32 tim_size;
__le32 ecsa_offset; __le32 ecsa_offset;
__le32 csa_offset; __le32 csa_offset;
struct ieee80211_hdr frame[0]; struct ieee80211_hdr frame[0];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */ } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
struct iwl_beacon_notif { struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr; struct iwl_mvm_tx_resp beacon_notify_hdr;
......
...@@ -929,7 +929,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, ...@@ -929,7 +929,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable)); sizeof(dump_info->fw_human_readable));
strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, strncpy(dump_info->dev_human_readable, fwrt->trans->name,
sizeof(dump_info->dev_human_readable) - 1); sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1); sizeof(dump_info->bus_human_readable) - 1);
...@@ -1230,13 +1230,15 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, ...@@ -1230,13 +1230,15 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iter->lmac = 0; iter->lmac = 0;
} }
if (!iter->internal_txf) if (!iter->internal_txf) {
for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
iter->fifo_size = iter->fifo_size =
cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
return true; return true;
} }
iter->fifo--;
}
iter->internal_txf = 1; iter->internal_txf = 1;
...@@ -2351,9 +2353,6 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, ...@@ -2351,9 +2353,6 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
u32 occur, delay; u32 occur, delay;
unsigned long idx; unsigned long idx;
if (test_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
return -EBUSY;
if (!iwl_fw_ini_trigger_on(fwrt, trig)) { if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
tp_id); tp_id);
......
...@@ -320,31 +320,6 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, ...@@ -320,31 +320,6 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
u32 new_domain;
int ret;
if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
ret = kstrtou32(buf, 0, &new_domain);
if (ret)
return ret;
if (new_domain != fwrt->trans->dbg.domains_bitmap) {
ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
if (ret)
return ret;
iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
NULL);
}
return count;
}
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf) size_t size, char *buf)
{ {
...@@ -352,7 +327,7 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, ...@@ -352,7 +327,7 @@ static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
fwrt->trans->dbg.domains_bitmap); fwrt->trans->dbg.domains_bitmap);
} }
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20); FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20);
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir) struct dentry *dbgfs_dir)
...@@ -360,5 +335,5 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, ...@@ -360,5 +335,5 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600); FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400);
} }
...@@ -251,7 +251,7 @@ struct iwl_fw_dbg { ...@@ -251,7 +251,7 @@ struct iwl_fw_dbg {
struct iwl_fw { struct iwl_fw {
u32 ucode_ver; u32 ucode_ver;
char fw_version[ETHTOOL_FWVERS_LEN]; char fw_version[64];
/* ucode images */ /* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX]; struct fw_img img[IWL_UCODE_TYPE_MAX];
......
...@@ -69,7 +69,7 @@ ...@@ -69,7 +69,7 @@
#include "iwl-eeprom-parse.h" #include "iwl-eeprom-parse.h"
#include "fw/acpi.h" #include "fw/acpi.h"
#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON #define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
struct iwl_fw_runtime_ops { struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx); int (*dump_start)(void *ctx);
...@@ -129,14 +129,6 @@ struct iwl_txf_iter_data { ...@@ -129,14 +129,6 @@ struct iwl_txf_iter_data {
u8 internal_txf; u8 internal_txf;
}; };
/**
* enum iwl_fw_runtime_status - fw runtime status flags
* @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
*/
enum iwl_fw_runtime_status {
STATUS_GEN_ACTIVE_TRIGS,
};
/** /**
* struct iwl_fw_runtime - runtime data for firmware * struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image * @fw: firmware image
...@@ -150,7 +142,6 @@ enum iwl_fw_runtime_status { ...@@ -150,7 +142,6 @@ enum iwl_fw_runtime_status {
* @smem_cfg: saved firmware SMEM configuration * @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by * @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image() * the driver by calling &iwl_fw_set_current_image()
* @status: &enum iwl_fw_runtime_status
* @dump: debug dump data * @dump: debug dump data
*/ */
struct iwl_fw_runtime { struct iwl_fw_runtime {
...@@ -171,8 +162,6 @@ struct iwl_fw_runtime { ...@@ -171,8 +162,6 @@ struct iwl_fw_runtime {
/* memory configuration */ /* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg; struct iwl_fwrt_shared_mem_cfg smem_cfg;
unsigned long status;
/* debug */ /* debug */
struct { struct {
const struct iwl_fw_dump_desc *desc; const struct iwl_fw_dump_desc *desc;
......
...@@ -284,52 +284,6 @@ struct iwl_pwr_tx_backoff { ...@@ -284,52 +284,6 @@ struct iwl_pwr_tx_backoff {
u32 backoff; u32 backoff;
}; };
/**
* struct iwl_csr_params
*
* @flag_sw_reset: reset the device
* @flag_mac_clock_ready:
* Indicates MAC (ucode processor, etc.) is powered up and can run.
* Internal resources are accessible.
* NOTE: This does not indicate that the processor is actually running.
* NOTE: This does not indicate that device has completed
* init or post-power-down restore of internal SRAM memory.
* Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
* SRAM is restored and uCode is in normal operation mode.
* This note is relevant only for pre 5xxx devices.
* NOTE: After device reset, this bit remains "0" until host sets
* INIT_DONE
* @flag_init_done: Host sets this to put device into fully operational
* D0 power mode. Host resets this after SW_RESET to put device into
* low power mode.
* @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
* to allow host access to device-internal resources. Host must wait for
* mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
* registers.
* @flag_val_mac_access_en: mac access is enabled
* @flag_master_dis: disable master
* @flag_stop_master: stop master
* @addr_sw_reset: address for resetting the device
* @mac_addr0_otp: first part of MAC address from OTP
* @mac_addr1_otp: second part of MAC address from OTP
* @mac_addr0_strap: first part of MAC address from strap
* @mac_addr1_strap: second part of MAC address from strap
*/
struct iwl_csr_params {
u8 flag_sw_reset;
u8 flag_mac_clock_ready;
u8 flag_init_done;
u8 flag_mac_access_req;
u8 flag_val_mac_access_en;
u8 flag_master_dis;
u8 flag_stop_master;
u8 addr_sw_reset;
u32 mac_addr0_otp;
u32 mac_addr1_otp;
u32 mac_addr0_strap;
u32 mac_addr1_strap;
};
/** /**
* struct iwl_cfg_trans - information needed to start the trans * struct iwl_cfg_trans - information needed to start the trans
* *
...@@ -348,7 +302,6 @@ struct iwl_csr_params { ...@@ -348,7 +302,6 @@ struct iwl_csr_params {
*/ */
struct iwl_cfg_trans_params { struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params; const struct iwl_base_params *base_params;
const struct iwl_csr_params *csr;
enum iwl_device_family device_family; enum iwl_device_family device_family;
u32 umac_prph_offset; u32 umac_prph_offset;
u32 rf_id:1, u32 rf_id:1,
...@@ -431,6 +384,8 @@ struct iwl_fw_mon_regs { ...@@ -431,6 +384,8 @@ struct iwl_fw_mon_regs {
* @uhb_supported: ultra high band channels supported * @uhb_supported: ultra high band channels supported
* @min_256_ba_txq_size: minimum number of slots required in a TX queue which * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
* supports 256 BA aggregation * supports 256 BA aggregation
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
* *
* We enable the driver to be backward compatible wrt. hardware features. * We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs * API differences in uCode shouldn't be handled here but through TLVs
...@@ -485,6 +440,7 @@ struct iwl_cfg { ...@@ -485,6 +440,7 @@ struct iwl_cfg {
u8 max_vht_ampdu_exponent; u8 max_vht_ampdu_exponent;
u8 ucode_api_max; u8 ucode_api_max;
u8 ucode_api_min; u8 ucode_api_min;
u16 num_rbds;
u32 min_umac_error_event_table; u32 min_umac_error_event_table;
u32 extra_phy_cfg_flags; u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr; u32 d3_debug_data_base_addr;
...@@ -496,12 +452,22 @@ struct iwl_cfg { ...@@ -496,12 +452,22 @@ struct iwl_cfg {
const struct iwl_fw_mon_regs mon_smem_regs; const struct iwl_fw_mon_regs mon_smem_regs;
}; };
extern const struct iwl_csr_params iwl_csr_v1; #define IWL_CFG_ANY (~0)
extern const struct iwl_csr_params iwl_csr_v2;
struct iwl_dev_info {
u16 device;
u16 subdevice;
const struct iwl_cfg *cfg;
const char *name;
};
/* /*
* This list declares the config structures for all devices. * This list declares the config structures for all devices.
*/ */
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const char iwl9260_160_name[];
extern const char iwl9560_160_name[];
#if IS_ENABLED(CONFIG_IWLDVM) #if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg;
...@@ -595,9 +561,6 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; ...@@ -595,9 +561,6 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
...@@ -636,6 +599,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; ...@@ -636,6 +599,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */ #endif /* __IWL_CONFIG_H__ */
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation * Copyright(c) 2018 - 2019 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -64,12 +64,12 @@ ...@@ -64,12 +64,12 @@
* the init done for driver command that configures several system modes * the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048. * exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12 * The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver) * default is short format - not supported by the driver)
* @IWL_CTXT_INFO_RB_SIZE_POS: RB size position * @IWL_CTXT_INFO_RB_SIZE: RB size mask
* (values are IWL_CTXT_INFO_RB_SIZE_*K) * (values are IWL_CTXT_INFO_RB_SIZE_*K)
* @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
* @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
...@@ -83,12 +83,12 @@ ...@@ -83,12 +83,12 @@
* @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
*/ */
enum iwl_context_info_flags { enum iwl_context_info_flags {
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0), IWL_CTXT_INFO_AUTO_FUNC_INIT = 0x0001,
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1), IWL_CTXT_INFO_EARLY_DEBUG = 0x0002,
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2), IWL_CTXT_INFO_ENABLE_CDMP = 0x0004,
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4, IWL_CTXT_INFO_RB_CB_SIZE = 0x00f0,
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8), IWL_CTXT_INFO_TFD_FORMAT_LONG = 0x0100,
IWL_CTXT_INFO_RB_SIZE_POS = 9, IWL_CTXT_INFO_RB_SIZE = 0x1e00,
IWL_CTXT_INFO_RB_SIZE_1K = 0x1, IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
IWL_CTXT_INFO_RB_SIZE_2K = 0x2, IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
IWL_CTXT_INFO_RB_SIZE_4K = 0x4, IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
......
...@@ -256,6 +256,7 @@ ...@@ -256,6 +256,7 @@
/* RESET */ /* RESET */
#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
...@@ -278,11 +279,35 @@ ...@@ -278,11 +279,35 @@
* 4: GOING_TO_SLEEP * 4: GOING_TO_SLEEP
* Indicates MAC is entering a power-saving sleep power-down. * Indicates MAC is entering a power-saving sleep power-down.
* Not a good time to access device-internal resources. * Not a good time to access device-internal resources.
* 3: MAC_ACCESS_REQ
* Host sets this to request and maintain MAC wakeup, to allow host
* access to device-internal resources. Host must wait for
* MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
* device registers.
* 2: INIT_DONE
* Host sets this to put device into fully operational D0 power mode.
* Host resets this after SW_RESET to put device into low power mode.
* 0: MAC_CLOCK_READY
* Indicates MAC (ucode processor, etc.) is powered up and can run.
* Internal resources are accessible.
* NOTE: This does not indicate that the processor is actually running.
* NOTE: This does not indicate that device has completed
* init or post-power-down restore of internal SRAM memory.
* Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
* SRAM is restored and uCode is in normal operation mode.
* Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
* do not need to save/restore it.
* NOTE: After device reset, this bit remains "0" until host sets
* INIT_DONE
*/ */
#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) #define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
......
...@@ -290,10 +290,19 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, ...@@ -290,10 +290,19 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0]; struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type); u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 domain = le32_to_cpu(hdr->domain);
enum iwl_ini_cfg_state *cfg_state = ext ? enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
int ret; int ret;
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & trans->dbg.domains_bitmap)) {
IWL_DEBUG_FW(trans,
"WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
domain, trans->dbg.domains_bitmap);
return;
}
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) { if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type); IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err; goto out_err;
...@@ -660,7 +669,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, ...@@ -660,7 +669,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
list_for_each_entry(node, hcmd_list, list) { list_for_each_entry(node, hcmd_list, list) {
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data; struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd; struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
u32 domain = le32_to_cpu(hcmd->hdr.domain);
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd); u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
struct iwl_host_cmd cmd = { struct iwl_host_cmd cmd = {
.id = WIDE_ID(hcmd_data->group, hcmd_data->id), .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
...@@ -668,10 +676,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, ...@@ -668,10 +676,6 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
.data = { hcmd_data->data, }, .data = { hcmd_data->data, },
}; };
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & fwrt->trans->dbg.domains_bitmap))
continue;
iwl_trans_send_cmd(fwrt->trans, &cmd); iwl_trans_send_cmd(fwrt->trans, &cmd);
} }
} }
...@@ -891,55 +895,17 @@ static void ...@@ -891,55 +895,17 @@ static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt, iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
struct iwl_dbg_tlv_time_point_data *tp) struct iwl_dbg_tlv_time_point_data *tp)
{ {
struct iwl_dbg_tlv_node *node, *tmp; struct iwl_dbg_tlv_node *node;
struct list_head *trig_list = &tp->trig_list; struct list_head *trig_list = &tp->trig_list;
struct list_head *active_trig_list = &tp->active_trig_list; struct list_head *active_trig_list = &tp->active_trig_list;
list_for_each_entry_safe(node, tmp, active_trig_list, list) {
list_del(&node->list);
kfree(node);
}
list_for_each_entry(node, trig_list, list) { list_for_each_entry(node, trig_list, list) {
struct iwl_ucode_tlv *tlv = &node->tlv; struct iwl_ucode_tlv *tlv = &node->tlv;
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
u32 domain = le32_to_cpu(trig->hdr.domain);
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & fwrt->trans->dbg.domains_bitmap))
continue;
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv); iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
} }
} }
int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
{
int i;
if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
return -EBUSY;
iwl_fw_flush_dumps(fwrt);
fwrt->trans->dbg.domains_bitmap = new_domain;
IWL_DEBUG_FW(fwrt,
"WRT: Generating active triggers list, domain 0x%x\n",
fwrt->trans->dbg.domains_bitmap);
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
struct iwl_dbg_tlv_time_point_data *tp =
&fwrt->trans->dbg.time_point[i];
iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
}
clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
return 0;
}
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data, struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data, union iwl_dbg_tlv_tp_data *tp_data,
...@@ -1013,7 +979,16 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) ...@@ -1013,7 +979,16 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i; int ret, i;
iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN); IWL_DEBUG_FW(fwrt,
"WRT: Generating active triggers list, domain 0x%x\n",
fwrt->trans->dbg.domains_bitmap);
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
struct iwl_dbg_tlv_time_point_data *tp =
&fwrt->trans->dbg.time_point[i];
iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
}
*ini_dest = IWL_FW_INI_LOCATION_INVALID; *ini_dest = IWL_FW_INI_LOCATION_INVALID;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
......
...@@ -105,7 +105,6 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans); ...@@ -105,7 +105,6 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id, enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data); union iwl_dbg_tlv_tp_data *tp_data);
int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/ #endif /* __iwl_dbg_tlv_h__*/
...@@ -493,6 +493,16 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, ...@@ -493,6 +493,16 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
} }
} }
static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
{
const char *name = drv->firmware_name;
if (strncmp(name, "iwlwifi-", 8) == 0)
name += 8;
return name;
}
static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw, const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces) struct iwl_firmware_pieces *pieces)
...@@ -551,12 +561,12 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, ...@@ -551,12 +561,12 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version, snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version), sizeof(drv->fw.fw_version),
"%u.%u.%u.%u%s", "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver),
buildstr); buildstr, iwl_reduced_fw_name(drv));
/* Verify size of file vs. image size info in file's header */ /* Verify size of file vs. image size info in file's header */
...@@ -636,12 +646,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -636,12 +646,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
snprintf(drv->fw.fw_version, snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version), sizeof(drv->fw.fw_version),
"%u.%u.%u.%u%s", "%u.%u.%u.%u%s %s",
IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MAJOR(drv->fw.ucode_ver),
IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver),
IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver),
IWL_UCODE_SERIAL(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver),
buildstr); buildstr, iwl_reduced_fw_name(drv));
data = ucode->data; data = ucode->data;
...@@ -895,11 +905,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -895,11 +905,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (major >= 35) if (major >= 35)
snprintf(drv->fw.fw_version, snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version), sizeof(drv->fw.fw_version),
"%u.%08x.%u", major, minor, local_comp); "%u.%08x.%u %s", major, minor,
local_comp, iwl_reduced_fw_name(drv));
else else
snprintf(drv->fw.fw_version, snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version), sizeof(drv->fw.fw_version),
"%u.%u.%u", major, minor, local_comp); "%u.%u.%u %s", major, minor,
local_comp, iwl_reduced_fw_name(drv));
break; break;
} }
case IWL_UCODE_TLV_FW_DBG_DEST: { case IWL_UCODE_TLV_FW_DBG_DEST: {
...@@ -1647,6 +1659,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) ...@@ -1647,6 +1659,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
#endif #endif
drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
ret = iwl_request_firmware(drv, true); ret = iwl_request_firmware(drv, true);
if (ret) { if (ret) {
IWL_ERR(trans, "Couldn't request the fw\n"); IWL_ERR(trans, "Couldn't request the fw\n");
......
...@@ -611,10 +611,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -611,10 +611,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/ */
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
#define MQ_RX_TABLE_SIZE 512 #define RX_POOL_SIZE(rbds) ((rbds) - 1 + \
#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
IWL_MAX_RX_HW_QUEUES * \ IWL_MAX_RX_HW_QUEUES * \
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
/* cb size is the exponent */ /* cb size is the exponent */
......
...@@ -70,36 +70,6 @@ ...@@ -70,36 +70,6 @@
#include "iwl-prph.h" #include "iwl-prph.h"
#include "iwl-fh.h" #include "iwl-fh.h"
const struct iwl_csr_params iwl_csr_v1 = {
.flag_mac_clock_ready = 0,
.flag_val_mac_access_en = 0,
.flag_init_done = 2,
.flag_mac_access_req = 3,
.flag_sw_reset = 7,
.flag_master_dis = 8,
.flag_stop_master = 9,
.addr_sw_reset = CSR_BASE + 0x020,
.mac_addr0_otp = 0x380,
.mac_addr1_otp = 0x384,
.mac_addr0_strap = 0x388,
.mac_addr1_strap = 0x38C
};
const struct iwl_csr_params iwl_csr_v2 = {
.flag_init_done = 6,
.flag_mac_clock_ready = 20,
.flag_val_mac_access_en = 20,
.flag_mac_access_req = 21,
.flag_master_dis = 28,
.flag_stop_master = 29,
.flag_sw_reset = 31,
.addr_sw_reset = CSR_BASE + 0x024,
.mac_addr0_otp = 0x30,
.mac_addr1_otp = 0x34,
.mac_addr0_strap = 0x38,
.mac_addr1_strap = 0x3C
};
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{ {
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
...@@ -506,8 +476,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans, ...@@ -506,8 +476,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* Set "initialization complete" bit to move adapter from * Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state. * D0U* --> D0A* (powered-up active) state.
*/ */
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
BIT(cfg_trans->csr->flag_init_done));
if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2); udelay(2);
...@@ -518,8 +487,8 @@ int iwl_finish_nic_init(struct iwl_trans *trans, ...@@ -518,8 +487,8 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* and accesses to uCode SRAM. * and accesses to uCode SRAM.
*/ */
err = iwl_poll_bit(trans, CSR_GP_CNTRL, err = iwl_poll_bit(trans, CSR_GP_CNTRL,
BIT(cfg_trans->csr->flag_mac_clock_ready), CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
BIT(cfg_trans->csr->flag_mac_clock_ready), CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000); 25000);
if (err < 0) if (err < 0)
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
......
...@@ -801,12 +801,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) ...@@ -801,12 +801,8 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
struct iwl_nvm_data *data) struct iwl_nvm_data *data)
{ {
__le32 mac_addr0 = __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
cpu_to_le32(iwl_read32(trans, __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
trans->trans_cfg->csr->mac_addr0_strap));
__le32 mac_addr1 =
cpu_to_le32(iwl_read32(trans,
trans->trans_cfg->csr->mac_addr1_strap));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
/* /*
...@@ -816,10 +812,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, ...@@ -816,10 +812,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
if (is_valid_ether_addr(data->hw_addr)) if (is_valid_ether_addr(data->hw_addr))
return; return;
mac_addr0 = cpu_to_le32(iwl_read32(trans, mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
trans->trans_cfg->csr->mac_addr0_otp)); mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
mac_addr1 = cpu_to_le32(iwl_read32(trans,
trans->trans_cfg->csr->mac_addr1_otp));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
} }
......
...@@ -411,13 +411,6 @@ enum { ...@@ -411,13 +411,6 @@ enum {
HW_STEP_LOCATION_BITS = 24, HW_STEP_LOCATION_BITS = 24,
}; };
#define AUX_MISC_MASTER1_EN 0xA20818
enum aux_misc_master1_en {
AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
};
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0 #define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */ /* device family 9000 WPROT register */
...@@ -430,6 +423,9 @@ enum aux_misc_master1_en { ...@@ -430,6 +423,9 @@ enum aux_misc_master1_en {
#define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_1_STATUS 0xA038C0
#define UMAG_SB_CPU_2_STATUS 0xA038C4 #define UMAG_SB_CPU_2_STATUS 0xA038C4
#define UMAG_GEN_HW_STATUS 0xA038C8 #define UMAG_GEN_HW_STATUS 0xA038C8
#define UREG_UMAC_CURRENT_PC 0xa05c18
#define UREG_LMAC1_CURRENT_PC 0xa05c1c
#define UREG_LMAC2_CURRENT_PC 0xa05c20
/* For UMAG_GEN_HW_STATUS reg check */ /* For UMAG_GEN_HW_STATUS reg check */
enum { enum {
......
...@@ -112,6 +112,8 @@ ...@@ -112,6 +112,8 @@
* 6) Eventually, the free function will be called. * 6) Eventually, the free function will be called.
*/ */
#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_FRAME_ALIGN 0x40
...@@ -358,6 +360,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) ...@@ -358,6 +360,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
} }
} }
static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
case IWL_AMSDU_2K:
return 2 * 1024;
case IWL_AMSDU_4K:
return 4 * 1024;
case IWL_AMSDU_8K:
return 8 * 1024;
case IWL_AMSDU_12K:
return 12 * 1024;
default:
WARN_ON(1);
return 0;
}
}
struct iwl_hcmd_names { struct iwl_hcmd_names {
u8 cmd_id; u8 cmd_id;
const char *const cmd_name; const char *const cmd_name;
...@@ -839,6 +859,8 @@ struct iwl_trans { ...@@ -839,6 +859,8 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
const char *name;
/* pointer to trans specific struct */ /* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */ /*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *)); char trans_specific[0] __aligned(sizeof(void *));
......
...@@ -989,6 +989,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -989,6 +989,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
vif = iwl_mvm_get_bss_vif(mvm); vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) { if (IS_ERR_OR_NULL(vif)) {
ret = 1; ret = 1;
...@@ -1083,6 +1085,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1083,6 +1085,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
ieee80211_restart_hw(mvm->hw); ieee80211_restart_hw(mvm->hw);
} }
} }
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
} }
out_noreset: out_noreset:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
...@@ -1929,6 +1933,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -1929,6 +1933,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
/* get the BSS vif pointer again */ /* get the BSS vif pointer again */
vif = iwl_mvm_get_bss_vif(mvm); vif = iwl_mvm_get_bss_vif(mvm);
if (IS_ERR_OR_NULL(vif)) if (IS_ERR_OR_NULL(vif))
......
...@@ -752,7 +752,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, ...@@ -752,7 +752,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos += scnprintf(pos, endpos - pos, "FW: %s\n", pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable); mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n", pos += scnprintf(pos, endpos - pos, "Device: %s\n",
mvm->fwrt.trans->cfg->name); mvm->fwrt.trans->name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n", pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
mvm->fwrt.dev->bus->name); mvm->fwrt.dev->bus->name);
......
...@@ -208,10 +208,11 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -208,10 +208,11 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tsf_mac_id = cpu_to_le32(0xff); cmd->tsf_mac_id = cpu_to_le32(0xff);
} }
static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm, static int
struct cfg80211_pmsr_request_peer *peer, iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
u8 *channel, u8 *bandwidth, struct cfg80211_pmsr_request_peer *peer,
u8 *ctrl_ch_position) u8 *channel, u8 *bandwidth,
u8 *ctrl_ch_position)
{ {
u32 freq = peer->chandef.chan->center_freq; u32 freq = peer->chandef.chan->center_freq;
...@@ -242,6 +243,45 @@ static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm, ...@@ -242,6 +243,45 @@ static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm,
return 0; return 0;
} }
static int
iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
u8 *channel, u8 *format_bw,
u8 *ctrl_ch_position)
{
u32 freq = peer->chandef.chan->center_freq;
*channel = ieee80211_frequency_to_channel(freq);
switch (peer->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_20:
*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_40:
*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_80:
*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
default:
IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
peer->chandef.width);
return -EINVAL;
}
*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
return 0;
}
static int static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer, struct cfg80211_pmsr_request_peer *peer,
...@@ -249,9 +289,9 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, ...@@ -249,9 +289,9 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
{ {
int ret; int ret;
ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num, ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
&target->bandwidth, &target->bandwidth,
&target->ctrl_ch_position); &target->ctrl_ch_position);
if (ret) if (ret)
return ret; return ret;
...@@ -278,18 +318,11 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, ...@@ -278,18 +318,11 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, static void
struct cfg80211_pmsr_request_peer *peer, iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
struct iwl_tof_range_req_ap_entry *target) struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{ {
int ret;
ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
&target->bandwidth,
&target->ctrl_ch_position);
if (ret)
return ret;
memcpy(target->bssid, peer->addr, ETH_ALEN); memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period = target->burst_period =
cpu_to_le16(peer->ftm.burst_period); cpu_to_le16(peer->ftm.burst_period);
...@@ -314,60 +347,166 @@ static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, ...@@ -314,60 +347,166 @@ static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR); FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT); FTM_PUT_FLAG(ALGO_FFT);
}
static int
iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry_v3 *target)
{
int ret;
ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
&target->bandwidth,
&target->ctrl_ch_position);
if (ret)
return ret;
/*
* Versions 3 and 4 has some common fields, so
* iwl_mvm_ftm_put_target_common() can be used for version 7 too.
*/
iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
return 0; return 0;
} }
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request *req) struct cfg80211_pmsr_request_peer *peer,
struct iwl_tof_range_req_ap_entry *target)
{
int ret;
ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
&target->format_bw,
&target->ctrl_ch_position);
if (ret)
return ret;
iwl_mvm_ftm_put_target_common(mvm, peer, target);
return 0;
}
static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
{
u32 status;
int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
if (!err && status) {
IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
status);
err = iwl_ftm_range_request_status_to_err(status);
}
return err;
}
static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{ {
struct iwl_tof_range_req_cmd_v5 cmd_v5; struct iwl_tof_range_req_cmd_v5 cmd_v5;
struct iwl_tof_range_req_cmd cmd;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
u8 num_of_ap;
struct iwl_host_cmd hcmd = { struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP, .dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v5,
.len[0] = sizeof(cmd_v5),
}; };
u32 status = 0; u8 i;
int err, i; int err;
lockdep_assert_held(&mvm->mutex); iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
if (mvm->ftm_initiator.req) for (i = 0; i < cmd_v5.num_of_ap; i++) {
return -EBUSY; struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
if (new_api) { err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
iwl_mvm_ftm_cmd(mvm, vif, &cmd, req); if (err)
hcmd.data[0] = &cmd; return err;
hcmd.len[0] = sizeof(cmd);
num_of_ap = cmd.num_of_ap;
} else {
iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
hcmd.data[0] = &cmd_v5;
hcmd.len[0] = sizeof(cmd_v5);
num_of_ap = cmd_v5.num_of_ap;
} }
for (i = 0; i < num_of_ap; i++) { return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd_v7 cmd_v7;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd_v7,
.len[0] = sizeof(cmd_v7),
};
u8 i;
int err;
/*
* Versions 7 and 8 has the same structure except from the responders
* list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
*/
iwl_mvm_ftm_cmd(mvm, vif, (void *)&cmd_v7, req);
for (i = 0; i < cmd_v7.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
if (new_api) err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]); if (err)
else return err;
err = iwl_mvm_ftm_put_target_v2(mvm, peer, }
&cmd_v5.ap[i]);
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
struct iwl_tof_range_req_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
.data[0] = &cmd,
.len[0] = sizeof(cmd),
};
u8 i;
int err;
iwl_mvm_ftm_cmd(mvm, vif, &cmd, req);
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
if (err) if (err)
return err; return err;
} }
err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
if (!err && status) { }
IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
status); int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
err = iwl_ftm_range_request_status_to_err(status); struct cfg80211_pmsr_request *req)
{
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
int err;
lockdep_assert_held(&mvm->mutex);
if (mvm->ftm_initiator.req)
return -EBUSY;
if (new_api) {
u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_REQ_CMD);
if (cmd_ver == 8)
err = iwl_mvm_ftm_start_v8(mvm, vif, req);
else
err = iwl_mvm_ftm_start_v7(mvm, vif, req);
} else {
err = iwl_mvm_ftm_start_v5(mvm, vif, req);
} }
if (!err) { if (!err) {
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation * Copyright (C) 2018 - 2019 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -62,12 +62,72 @@ ...@@ -62,12 +62,72 @@
#include "mvm.h" #include "mvm.h"
#include "constants.h" #include "constants.h"
static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
u8 *bw, u8 *ctrl_ch_position)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
*bw = IWL_TOF_BW_20_LEGACY;
break;
case NL80211_CHAN_WIDTH_20:
*bw = IWL_TOF_BW_20_HT;
break;
case NL80211_CHAN_WIDTH_40:
*bw = IWL_TOF_BW_40;
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
case NL80211_CHAN_WIDTH_80:
*bw = IWL_TOF_BW_80;
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
default:
return -ENOTSUPP;
}
return 0;
}
static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
u8 *format_bw,
u8 *ctrl_ch_position)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_20:
*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
break;
case NL80211_CHAN_WIDTH_40:
*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
case NL80211_CHAN_WIDTH_80:
*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
default:
return -ENOTSUPP;
}
return 0;
}
static int static int
iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef) struct cfg80211_chan_def *chandef)
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
/*
* The command structure is the same for versions 6 and 7, (only the
* field interpretation is different), so the same struct can be use
* for all cases.
*/
struct iwl_tof_responder_config_cmd cmd = { struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value, .channel_num = chandef->chan->hw_value,
.cmd_valid_fields = .cmd_valid_fields =
...@@ -76,27 +136,22 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, ...@@ -76,27 +136,22 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
IWL_TOF_RESPONDER_CMD_VALID_STA_ID), IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
.sta_id = mvmvif->bcast_sta.sta_id, .sta_id = mvmvif->bcast_sta.sta_id,
}; };
u8 cmd_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
TOF_RESPONDER_CONFIG_CMD);
int err;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
switch (chandef->width) { if (cmd_ver == 7)
case NL80211_CHAN_WIDTH_20_NOHT: err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
cmd.bandwidth = IWL_TOF_BW_20_LEGACY; &cmd.ctrl_ch_position);
break; else
case NL80211_CHAN_WIDTH_20: err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
cmd.bandwidth = IWL_TOF_BW_20_HT; &cmd.ctrl_ch_position);
break;
case NL80211_CHAN_WIDTH_40: if (err) {
cmd.bandwidth = IWL_TOF_BW_40; IWL_ERR(mvm, "Failed to set responder bandwidth\n");
cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); return err;
break;
case NL80211_CHAN_WIDTH_80:
cmd.bandwidth = IWL_TOF_BW_80;
cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
default:
WARN_ON(1);
return -EINVAL;
} }
memcpy(cmd.bssid, vif->addr, ETH_ALEN); memcpy(cmd.bssid, vif->addr, ETH_ALEN);
......
...@@ -353,22 +353,35 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -353,22 +353,35 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) { if (ret) {
struct iwl_trans *trans = mvm->trans; struct iwl_trans *trans = mvm->trans;
if (ret == -ETIMEDOUT) if (trans->trans_cfg->device_family >=
iwl_fw_dbg_error_collect(&mvm->fwrt, IWL_DEVICE_FAMILY_22000) {
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm, IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans, iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS)); UMAG_SB_CPU_2_STATUS));
else if (trans->trans_cfg->device_family >= IWL_ERR(mvm, "UMAC PC: 0x%x\n",
IWL_DEVICE_FAMILY_8000) iwl_read_umac_prph(trans,
UREG_UMAC_CURRENT_PC));
IWL_ERR(mvm, "LMAC PC: 0x%x\n",
iwl_read_umac_prph(trans,
UREG_LMAC1_CURRENT_PC));
if (iwl_mvm_is_cdb_supported(mvm))
IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
iwl_read_umac_prph(trans,
UREG_LMAC2_CURRENT_PC));
} else if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm, IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_1_STATUS),
iwl_read_prph(trans, SB_CPU_2_STATUS)); iwl_read_prph(trans, SB_CPU_2_STATUS));
}
if (ret == -ETIMEDOUT)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_fw_set_current_image(&mvm->fwrt, old_type);
return ret; return ret;
} }
......
...@@ -1160,6 +1160,7 @@ struct iwl_mvm { ...@@ -1160,6 +1160,7 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
*/ */
enum iwl_mvm_status { enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_RFKILL,
...@@ -1170,6 +1171,7 @@ enum iwl_mvm_status { ...@@ -1170,6 +1171,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
}; };
/* Keep track of completed init configuration */ /* Keep track of completed init configuration */
......
...@@ -178,7 +178,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, ...@@ -178,7 +178,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else { } else {
IWL_DEBUG_EEPROM(mvm->trans->dev, IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n", "NVM access command failed with status %d (device: %s)\n",
ret, mvm->cfg->name); ret, mvm->trans->name);
ret = -ENODATA; ret = -ENODATA;
} }
goto exit; goto exit;
......
...@@ -830,7 +830,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -830,7 +830,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
} }
IWL_INFO(mvm, "Detected %s, REV=0x%X\n", IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev); mvm->trans->name, mvm->trans->hw_rev);
if (iwlwifi_mod_params.nvm_file) if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
......
...@@ -198,7 +198,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, ...@@ -198,7 +198,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
if (!mvmvif->queue_params[ac].uapsd) if (!mvmvif->queue_params[ac].uapsd)
continue; continue;
if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd->flags |= cmd->flags |=
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
...@@ -233,15 +233,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, ...@@ -233,15 +233,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window = cmd->snooze_window =
(mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
} }
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags & if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->rx_data_timeout_uapsd = cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd = cmd->tx_data_timeout_uapsd =
...@@ -354,8 +354,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) ...@@ -354,8 +354,7 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd, struct iwl_mac_power_cmd *cmd)
bool host_awake)
{ {
int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip; int skip;
...@@ -370,7 +369,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, ...@@ -370,7 +369,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
if (dtimper >= 10) if (dtimper >= 10)
return; return;
if (host_awake) { if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return; return;
skip = 2; skip = 2;
...@@ -390,8 +389,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, ...@@ -390,8 +389,7 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd, struct iwl_mac_power_cmd *cmd)
bool host_awake)
{ {
int dtimper, bi; int dtimper, bi;
int keep_alive; int keep_alive;
...@@ -437,9 +435,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, ...@@ -437,9 +435,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
} }
iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake); iwl_mvm_power_config_skip_dtim(mvm, vif, cmd);
if (!host_awake) { if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
cmd->rx_data_timeout = cmd->rx_data_timeout =
cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
cmd->tx_data_timeout = cmd->tx_data_timeout =
...@@ -512,8 +510,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, ...@@ -512,8 +510,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{ {
struct iwl_mac_power_cmd cmd = {}; struct iwl_mac_power_cmd cmd = {};
iwl_mvm_power_build_cmd(mvm, vif, &cmd, iwl_mvm_power_build_cmd(mvm, vif, &cmd);
mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
iwl_mvm_power_log(mvm, &cmd); iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
...@@ -536,7 +533,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) ...@@ -536,7 +533,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
mvm->disable_power_off_d3 : mvm->disable_power_off) mvm->disable_power_off_d3 : mvm->disable_power_off)
cmd.flags &= cmd.flags &=
cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
...@@ -943,7 +940,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, ...@@ -943,7 +940,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
if (!mvmvif->bf_data.bf_enabled) if (!mvmvif->bf_data.bf_enabled)
return 0; return 0;
if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
......
...@@ -1906,20 +1906,6 @@ iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params, ...@@ -1906,20 +1906,6 @@ iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid); iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
} }
static void
iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_channel_params_v3 *cp)
{
cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
cp->count = params->n_channels;
iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
params->n_channels, 0,
cp->channel_config);
}
static void static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm, iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params, struct iwl_mvm_scan_params *params,
...@@ -1937,37 +1923,6 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm, ...@@ -1937,37 +1923,6 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type); vif->type);
} }
static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
int uid)
{
struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd;
struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params;
int ret;
u16 gen_flags;
mvm->scan_uid_status[uid] = type;
cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
&scan_p->general_params,
gen_flags);
ret = iwl_mvm_fill_scan_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
if (ret)
return ret;
iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif,
&scan_p->channel_params);
return 0;
}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type, struct iwl_mvm_scan_params *params, int type,
...@@ -2152,7 +2107,6 @@ static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { ...@@ -2152,7 +2107,6 @@ static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */ /* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(13), IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12), IWL_SCAN_UMAC_HANDLER(12),
IWL_SCAN_UMAC_HANDLER(11),
}; };
static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
...@@ -2511,7 +2465,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver) ...@@ -2511,7 +2465,6 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
switch (scan_ver) { switch (scan_ver) {
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13); IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12); IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11);
} }
return 0; return 0;
......
...@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ...@@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size = ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size = ctxt_info_gen3->mcr_size =
cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info; trans_pcie->prph_info = prph_info;
......
...@@ -193,11 +193,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ...@@ -193,11 +193,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K; rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
} }
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG | control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << control_flags |=
IWL_CTXT_INFO_RB_CB_SIZE_POS) | u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
(rb_size << IWL_CTXT_INFO_RB_SIZE_POS); IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags); ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */ /* initialize RX default queue */
......
...@@ -106,6 +106,8 @@ struct iwl_host_cmd; ...@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page * @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW * @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table * @vid: index of this rxb in the global table
* @offset: indicates which offset of the page (in bytes)
* this buffer uses (if multiple RBs fit into one page)
*/ */
struct iwl_rx_mem_buffer { struct iwl_rx_mem_buffer {
dma_addr_t page_dma; dma_addr_t page_dma;
...@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer { ...@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid; u16 vid;
bool invalid; bool invalid;
struct list_head list; struct list_head list;
u32 offset;
}; };
/** /**
...@@ -491,6 +494,7 @@ struct cont_rec { ...@@ -491,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame. * frame.
* @rx_page_order: page order for receive buffer size * @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access * @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw * @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight * @cmd_in_flight: true when we have a host command in flight
...@@ -510,11 +514,16 @@ struct cont_rec { ...@@ -510,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan * @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues * @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status * @base_rb_stts_dma: base physical address of receive buffer status
* @supported_dma_mask: DMA mask to validate the actual address against,
* will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
* @alloc_page_lock: spinlock for the page allocator
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
*/ */
struct iwl_trans_pcie { struct iwl_trans_pcie {
struct iwl_rxq *rxq; struct iwl_rxq *rxq;
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *rx_pool;
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba; struct iwl_rb_allocator rba;
union { union {
struct iwl_context_info *ctxt_info; struct iwl_context_info *ctxt_info;
...@@ -573,6 +582,7 @@ struct iwl_trans_pcie { ...@@ -573,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs; u8 max_tbs;
u16 tfd_size; u16 tfd_size;
u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size; enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword; bool bc_table_dword;
...@@ -580,6 +590,13 @@ struct iwl_trans_pcie { ...@@ -580,6 +590,13 @@ struct iwl_trans_pcie {
bool sw_csum_tx; bool sw_csum_tx;
bool pcie_dbg_dumped_once; bool pcie_dbg_dumped_once;
u32 rx_page_order; u32 rx_page_order;
u32 rx_buf_bytes;
u32 supported_dma_mask;
/* allocator lock for the two values below */
spinlock_t alloc_page_lock;
struct page *alloc_page;
u32 alloc_page_used;
/*protect hw register */ /*protect hw register */
spinlock_t reg_lock; spinlock_t reg_lock;
......
...@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, ...@@ -240,7 +240,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg); reg);
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true; rxq->need_update = true;
return; return;
} }
...@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, ...@@ -298,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
/* /*
...@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, ...@@ -318,8 +319,8 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list); list);
list_del(&rxb->list); list_del(&rxb->list);
rxb->invalid = false; rxb->invalid = false;
/* 12 first bits are expected to be empty */ /* some low bits are expected to be unset (depending on hw) */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */ /* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb); iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
...@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) ...@@ -412,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
* *
*/ */
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
gfp_t priority) u32 *offset, gfp_t priority)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page; struct page *page;
gfp_t gfp_mask = priority; gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0) if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP; gfp_mask |= __GFP_COMP;
if (trans_pcie->alloc_page) {
spin_lock_bh(&trans_pcie->alloc_page_lock);
/* recheck */
if (trans_pcie->alloc_page) {
*offset = trans_pcie->alloc_page_used;
page = trans_pcie->alloc_page;
trans_pcie->alloc_page_used += rbsize;
if (trans_pcie->alloc_page_used >= allocsize)
trans_pcie->alloc_page = NULL;
else
get_page(page);
spin_unlock_bh(&trans_pcie->alloc_page_lock);
return page;
}
spin_unlock_bh(&trans_pcie->alloc_page_lock);
}
/* Alloc a new receive buffer */ /* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) { if (!page) {
...@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, ...@@ -436,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n"); "Failed to alloc_pages\n");
return NULL; return NULL;
} }
if (2 * rbsize <= allocsize) {
spin_lock_bh(&trans_pcie->alloc_page_lock);
if (!trans_pcie->alloc_page) {
get_page(page);
trans_pcie->alloc_page = page;
trans_pcie->alloc_page_used = rbsize;
}
spin_unlock_bh(&trans_pcie->alloc_page_lock);
}
*offset = 0;
return page; return page;
} }
...@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, ...@@ -456,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page; struct page *page;
while (1) { while (1) {
unsigned int offset;
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) { if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock); spin_unlock(&rxq->lock);
...@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, ...@@ -463,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
} }
spin_unlock(&rxq->lock); spin_unlock(&rxq->lock);
/* Alloc a new receive buffer */ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
page = iwl_pcie_rx_alloc_page(trans, priority);
if (!page) if (!page)
return; return;
...@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, ...@@ -482,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page); BUG_ON(rxb->page);
rxb->page = page; rxb->page = page;
rxb->offset = offset;
/* Get physical address of the RB */ /* Get physical address of the RB */
rxb->page_dma = rxb->page_dma =
dma_map_page(trans->dev, page, 0, dma_map_page(trans->dev, page, rxb->offset,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL; rxb->page = NULL;
...@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) ...@@ -510,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
for (i = 0; i < RX_POOL_SIZE; i++) { for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page) if (!trans_pcie->rx_pool[i].page)
continue; continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page, __free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order); trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL; trans_pcie->rx_pool[i].page = NULL;
...@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) ...@@ -568,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page); BUG_ON(rxb->page);
/* Alloc a new receive buffer */ /* Alloc a new receive buffer */
page = iwl_pcie_rx_alloc_page(trans, gfp_mask); page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
gfp_mask);
if (!page) if (!page)
continue; continue;
rxb->page = page; rxb->page = page;
/* Get physical address of the RB */ /* Get physical address of the RB */
rxb->page_dma = dma_map_page(trans->dev, page, 0, rxb->page_dma = dma_map_page(trans->dev, page,
PAGE_SIZE << trans_pcie->rx_page_order, rxb->offset,
DMA_FROM_DEVICE); trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL; rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order); __free_pages(page, trans_pcie->rx_page_order);
...@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -738,7 +773,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
spin_lock_init(&rxq->lock); spin_lock_init(&rxq->lock);
if (trans->trans_cfg->mq_rx_supported) if (trans->trans_cfg->mq_rx_supported)
rxq->queue_size = MQ_RX_TABLE_SIZE; rxq->queue_size = trans->cfg->num_rbds;
else else
rxq->queue_size = RX_QUEUE_SIZE; rxq->queue_size = RX_QUEUE_SIZE;
...@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) ...@@ -807,8 +842,18 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL); GFP_KERNEL);
if (!trans_pcie->rxq) trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
return -ENOMEM; sizeof(trans_pcie->rx_pool[0]),
GFP_KERNEL);
trans_pcie->global_table =
kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
sizeof(trans_pcie->global_table[0]),
GFP_KERNEL);
if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
!trans_pcie->global_table) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&rba->lock); spin_lock_init(&rba->lock);
...@@ -845,6 +890,8 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) ...@@ -845,6 +890,8 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0; trans_pcie->base_rb_stts_dma = 0;
} }
kfree(trans_pcie->rx_pool);
kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq); kfree(trans_pcie->rxq);
return ret; return ret;
...@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -1081,12 +1128,11 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
/* move the pool to the default queue and allocator ownerships */ /* move the pool to the default queue and allocator ownerships */
queue_size = trans->trans_cfg->mq_rx_supported ? queue_size = trans->trans_cfg->mq_rx_supported ?
MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues * allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size; num_alloc = queue_size + allocator_pool_size;
BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
ARRAY_SIZE(trans_pcie->rx_pool));
for (i = 0; i < num_alloc; i++) { for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
...@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) ...@@ -1177,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll) if (rxq->napi.poll)
netif_napi_del(&rxq->napi); netif_napi_del(&rxq->napi);
} }
kfree(trans_pcie->rx_pool);
kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq); kfree(trans_pcie->rxq);
if (trans_pcie->alloc_page)
__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
} }
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
...@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1235,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false; bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0; u32 offset = 0;
if (WARN_ON(!rxb)) if (WARN_ON(!rxb))
...@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1249,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim; bool reclaim;
int index, cmd_index, len; int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = { struct iwl_rx_cmd_buffer rxcb = {
._offset = offset, ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order, ._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page, ._page = rxb->page,
._page_stolen = false, ._page_stolen = false,
...@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1355,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */ * rx_free list for reuse later. */
if (rxb->page != NULL) { if (rxb->page != NULL) {
rxb->page_dma = rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0, dma_map_page(trans->dev, rxb->page, rxb->offset,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/* /*
...@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, ...@@ -1390,13 +1441,12 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb; return rxb;
} }
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; vid = le16_to_cpu(rxq->cd[i].rbid);
else else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err; goto out_err;
rxb = trans_pcie->global_table[vid - 1]; rxb = trans_pcie->global_table[vid - 1];
......
...@@ -132,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) ...@@ -132,8 +132,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from * Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/ */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
BIT(trans->trans_cfg->csr->flag_init_done));
} }
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
...@@ -175,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) ...@@ -175,7 +174,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */ /* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */ /* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false); iwl_pcie_gen2_apm_stop(trans, false);
......
...@@ -183,8 +183,7 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) ...@@ -183,8 +183,7 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{ {
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset, iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
BIT(trans->trans_cfg->csr->flag_sw_reset));
usleep_range(5000, 6000); usleep_range(5000, 6000);
} }
...@@ -487,8 +486,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) ...@@ -487,8 +486,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from * Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/ */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
BIT(trans->trans_cfg->csr->flag_init_done));
/* Activates XTAL resources monitor */ /* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
...@@ -510,12 +508,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) ...@@ -510,12 +508,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret; int ret;
/* stop device's busmaster DMA activity */ /* stop device's busmaster DMA activity */
iwl_set_bit(trans, trans->trans_cfg->csr->addr_sw_reset, iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
BIT(trans->trans_cfg->csr->flag_stop_master));
ret = iwl_poll_bit(trans, trans->trans_cfg->csr->addr_sw_reset, ret = iwl_poll_bit(trans, CSR_RESET,
BIT(trans->trans_cfg->csr->flag_master_dis), CSR_RESET_REG_FLAG_MASTER_DISABLED,
BIT(trans->trans_cfg->csr->flag_master_dis), 100); CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0) if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
...@@ -564,8 +561,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) ...@@ -564,8 +561,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from * Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state. * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/ */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
BIT(trans->trans_cfg->csr->flag_init_done));
} }
static int iwl_pcie_nic_init(struct iwl_trans *trans) static int iwl_pcie_nic_init(struct iwl_trans *trans)
...@@ -1270,7 +1266,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) ...@@ -1270,7 +1266,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */ /* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */ /* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false); iwl_pcie_apm_stop(trans, false);
...@@ -1494,9 +1490,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, ...@@ -1494,9 +1490,8 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans); iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
BIT(trans->trans_cfg->csr->flag_init_done));
if (reset) { if (reset) {
/* /*
...@@ -1561,7 +1556,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1561,7 +1556,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
} }
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans, trans->trans_cfg); ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret) if (ret)
...@@ -1583,7 +1578,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1583,7 +1578,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) { if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL, iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else { } else {
iwl_trans_pcie_tx_reset(trans); iwl_trans_pcie_tx_reset(trans);
...@@ -1945,6 +1940,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1945,6 +1940,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order = trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
trans_pcie->rx_buf_bytes =
iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->scd_set_active = trans_cfg->scd_set_active;
...@@ -2054,7 +2054,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, ...@@ -2054,7 +2054,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */ /* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2); udelay(2);
...@@ -2079,8 +2079,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, ...@@ -2079,8 +2079,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling. * and do not save/restore SRAM when power cycling.
*/ */
ret = iwl_poll_bit(trans, CSR_GP_CNTRL, ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_val_mac_access_en), CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(BIT(trans->trans_cfg->csr->flag_mac_clock_ready) | (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
...@@ -2162,7 +2162,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, ...@@ -2162,7 +2162,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out; goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* /*
* Above we read the CSR_GP_CNTRL register, which will flush * Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the * any previous writes, but we need the write that clears the
...@@ -2963,7 +2963,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, ...@@ -2963,7 +2963,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums) int allocated_rb_nums)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0]; struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0; u32 i, r, j, rb_len = 0;
...@@ -2989,9 +2989,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, ...@@ -2989,9 +2989,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i); rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len); memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */ /* remap the page for the free benefit */
rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, rxb->page_dma = dma_map_page(trans->dev, rxb->page,
max_len, rxb->offset, max_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data); *data = iwl_fw_error_next_data(*data);
} }
...@@ -3482,6 +3482,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -3482,6 +3482,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true; trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->reg_lock);
spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex); mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->ucode_write_waitq);
......
...@@ -587,6 +587,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -587,6 +587,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx; int idx;
void *tfd; void *tfd;
if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", txq_id))
return -EINVAL;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id)) "TX on unused queue %d\n", txq_id))
return -EINVAL; return -EINVAL;
...@@ -1101,9 +1105,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, ...@@ -1101,9 +1105,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id]; struct iwl_txq *txq;
int i; int i;
if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", txq_id))
return;
txq = trans_pcie->txq[txq_id];
if (WARN_ON(!txq)) if (WARN_ON(!txq))
return; return;
...@@ -1258,6 +1268,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) ...@@ -1258,6 +1268,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue))
return;
/* /*
* Upon HW Rfkill - we stop the device, and then stop the queues * Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode, * in the op_mode. Just for the sake of the simplicity of the op_mode,
......
...@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, ...@@ -306,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg); txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true; txq->need_update = true;
return; return;
} }
...@@ -646,7 +646,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) ...@@ -646,7 +646,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
trans_pcie->cmd_hold_nic_awake = false; trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} }
/* /*
...@@ -1255,16 +1255,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, ...@@ -1255,16 +1255,16 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (trans->trans_cfg->base_params->apmg_wake_up_wa && if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) { !trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL, ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_val_mac_access_en), CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(BIT(trans->trans_cfg->csr->flag_mac_clock_ready) | (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000); 15000);
if (ret < 0) { if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->trans_cfg->csr->flag_mac_access_req)); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO; return -EIO;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment