Commit ef16ea32 authored by John W. Linville's avatar John W. Linville

Merge tag 'iwlwifi-next-for-john-2014-11-24' of...

Merge tag 'iwlwifi-next-for-john-2014-11-24' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Emmanuel Grumbach <egrumbach@gmail.com> says:

"Major works are CSA and TDLS. On top of that I have a new
firmware API for scan and a few rate control improvements.
Johannes find a few tricks to improve our CPU utilization
and adds support for a new spin of 7265 called 7265D.
Along with this a few random things that don't stand out."
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parents 9e6f3f47 dcad8e42
...@@ -59,7 +59,7 @@ config IWLDVM ...@@ -59,7 +59,7 @@ config IWLDVM
config IWLMVM config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support" tristate "Intel Wireless WiFi MVM Firmware support"
select BACKPORT_WANT_DEV_COREDUMP select WANT_DEV_COREDUMP
help help
This is the driver that supports the MVM firmware which is This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices. currently only available for 7260 and 3160 devices.
......
...@@ -102,6 +102,9 @@ ...@@ -102,6 +102,9 @@
#define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0 #define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = { static const struct iwl_base_params iwl7000_base_params = {
...@@ -132,8 +135,8 @@ static const struct iwl_ht_params iwl7000_ht_params = { ...@@ -132,8 +135,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.base_params = &iwl7000_base_params, \ .base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
.non_shared_ant = ANT_A .non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl7260_2ac_cfg = { const struct iwl_cfg iwl7260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7260", .name = "Intel(R) Dual Band Wireless AC 7260",
...@@ -267,7 +270,38 @@ const struct iwl_cfg iwl7265_n_cfg = { ...@@ -267,7 +270,38 @@ const struct iwl_cfg iwl7265_n_cfg = {
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
}; };
const struct iwl_cfg iwl7265d_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265d_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 7265",
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265d_n_cfg = {
.name = "Intel(R) Wireless N 7265",
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
...@@ -91,6 +91,10 @@ ...@@ -91,6 +91,10 @@
/* Max SDIO RX aggregation size of the ADDBA request/response */ /* Max SDIO RX aggregation size of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 28 #define MAX_RX_AGG_SIZE_8260_SDIO 28
/* Max A-MPDU exponent for HT and VHT */
#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
static const struct iwl_base_params iwl8000_base_params = { static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES, .num_of_queues = IWLAGN_NUM_QUEUES,
...@@ -119,6 +123,7 @@ static const struct iwl_ht_params iwl8000_ht_params = { ...@@ -119,6 +123,7 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.base_params = &iwl8000_base_params, \ .base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \ .led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
.d0i3 = true, \
.non_shared_ant = ANT_A .non_shared_ant = ANT_A
const struct iwl_cfg iwl8260_2n_cfg = { const struct iwl_cfg iwl8260_2n_cfg = {
...@@ -137,6 +142,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = { ...@@ -137,6 +142,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.ht_params = &iwl8000_ht_params, .ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION, .nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
const struct iwl_cfg iwl8260_2ac_sdio_cfg = { const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
...@@ -149,6 +155,23 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { ...@@ -149,6 +155,23 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true, .disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl4265_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4265",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
}; };
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
...@@ -257,6 +257,10 @@ struct iwl_pwr_tx_backoff { ...@@ -257,6 +257,10 @@ struct iwl_pwr_tx_backoff {
* @pwr_tx_backoffs: translation table between power limits and backoffs * @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
* station can receive in HT
* @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
* station can receive in VHT
* *
* We enable the driver to be backward compatible wrt. hardware features. * We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs * API differences in uCode shouldn't be handled here but through TLVs
...@@ -297,6 +301,8 @@ struct iwl_cfg { ...@@ -297,6 +301,8 @@ struct iwl_cfg {
unsigned int max_rx_agg_size; unsigned int max_rx_agg_size;
bool disable_dummy_notification; bool disable_dummy_notification;
unsigned int max_tx_agg_size; unsigned int max_tx_agg_size;
unsigned int max_ht_ampdu_exponent;
unsigned int max_vht_ampdu_exponent;
}; };
/* /*
...@@ -358,9 +364,13 @@ extern const struct iwl_cfg iwl3165_2ac_cfg; ...@@ -358,9 +364,13 @@ extern const struct iwl_cfg iwl3165_2ac_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg; extern const struct iwl_cfg iwl7265_n_cfg;
extern const struct iwl_cfg iwl7265d_2ac_cfg;
extern const struct iwl_cfg iwl7265d_2n_cfg;
extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */ #endif /* __IWL_CONFIG_H__ */
...@@ -129,6 +129,8 @@ ...@@ -129,6 +129,8 @@
#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) #define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) #define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
#define CSR_MBOX_SET_REG (CSR_BASE + 0x88)
#define CSR_LED_REG (CSR_BASE+0x094) #define CSR_LED_REG (CSR_BASE+0x094)
#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */ #define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
...@@ -184,6 +186,8 @@ ...@@ -184,6 +186,8 @@
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ #define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
...@@ -321,6 +325,7 @@ enum { ...@@ -321,6 +325,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_135 (0x0000120)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */ /* EEPROM REG */
......
...@@ -143,7 +143,7 @@ do { \ ...@@ -143,7 +143,7 @@ do { \
#define IWL_DL_INFO 0x00000001 #define IWL_DL_INFO 0x00000001
#define IWL_DL_MAC80211 0x00000002 #define IWL_DL_MAC80211 0x00000002
#define IWL_DL_HCMD 0x00000004 #define IWL_DL_HCMD 0x00000004
#define IWL_DL_STATE 0x00000008 #define IWL_DL_TDLS 0x00000008
/* 0x000000F0 - 0x00000010 */ /* 0x000000F0 - 0x00000010 */
#define IWL_DL_QUOTA 0x00000010 #define IWL_DL_QUOTA 0x00000010
#define IWL_DL_TE 0x00000020 #define IWL_DL_TE 0x00000020
...@@ -180,6 +180,7 @@ do { \ ...@@ -180,6 +180,7 @@ do { \
#define IWL_DL_TX_QUEUES 0x80000000 #define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) #define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
......
...@@ -764,7 +764,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, ...@@ -764,7 +764,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
if (iwlwifi_mod_params.amsdu_size_8K) if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF; ht_info->mcs.rx_mask[0] = 0xFF;
......
...@@ -145,24 +145,30 @@ enum iwl_ucode_tlv_api { ...@@ -145,24 +145,30 @@ enum iwl_ucode_tlv_api {
/** /**
* enum iwl_ucode_tlv_capa - ucode capabilities * enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report * tx power value into TPC Report action frame and Link Measurement Report
* action frame * action frame
* @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports adding DS params * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
* element in probe requests. * channel in DS parameter set element in probe requests.
* @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
* probe requests. * probe requests.
* @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command * which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
*/ */
enum iwl_ucode_tlv_capa { enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
}; };
/* The default calibrate table size if not specified by firmware file */ /* The default calibrate table size if not specified by firmware file */
......
...@@ -325,6 +325,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, ...@@ -325,6 +325,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
{ {
int num_rx_ants = num_of_ant(rx_chains); int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains); int num_tx_ants = num_of_ant(tx_chains);
unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
IEEE80211_VHT_MAX_AMPDU_1024K);
vht_cap->vht_supported = true; vht_cap->vht_supported = true;
...@@ -332,7 +334,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, ...@@ -332,7 +334,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (cfg->ht_params->ldpc) if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
......
...@@ -138,7 +138,8 @@ struct iwl_cfg; ...@@ -138,7 +138,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started. * @nic_config: configure NIC, called before firmware is started.
* May sleep * May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep * @wimax_active: invoked when WiMax becomes active. May sleep
* @enter_d0i3: configure the fw to enter d0i3. May sleep. * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3
* entrance is aborted (e.g. due to held reference). May sleep.
* @exit_d0i3: configure the fw to exit d0i3. May sleep. * @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/ */
struct iwl_op_mode_ops { struct iwl_op_mode_ops {
......
...@@ -349,10 +349,10 @@ enum secure_load_status_reg { ...@@ -349,10 +349,10 @@ enum secure_load_status_reg {
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000) #define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000) #define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x404000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x405000)
#define LMPM_SECURE_TIME_OUT (100) #define LMPM_SECURE_TIME_OUT (50000) /* 5 msec */
/* Rx FIFO */ /* Rx FIFO */
#define RXF_SIZE_ADDR (0xa00c88) #define RXF_SIZE_ADDR (0xa00c88)
...@@ -368,4 +368,10 @@ enum secure_load_status_reg { ...@@ -368,4 +368,10 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48) #define MON_BUFF_CYCLE_CNT (0xa03c48)
/* FW chicken bits */
#define LMPM_CHICK 0xA01FF8
enum {
LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
};
#endif /* __iwl_prph_h__ */ #endif /* __iwl_prph_h__ */
...@@ -534,6 +534,8 @@ struct iwl_trans_ops { ...@@ -534,6 +534,8 @@ struct iwl_trans_ops {
u32 value); u32 value);
void (*ref)(struct iwl_trans *trans); void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans); void (*unref)(struct iwl_trans *trans);
void (*suspend)(struct iwl_trans *trans);
void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
}; };
...@@ -702,6 +704,18 @@ static inline void iwl_trans_unref(struct iwl_trans *trans) ...@@ -702,6 +704,18 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans); trans->ops->unref(trans);
} }
static inline void iwl_trans_suspend(struct iwl_trans *trans)
{
if (trans->ops->suspend)
trans->ops->suspend(trans);
}
static inline void iwl_trans_resume(struct iwl_trans *trans)
{
if (trans->ops->resume)
trans->ops->resume(trans);
}
static inline struct iwl_trans_dump_data * static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans) iwl_trans_dump_data(struct iwl_trans *trans)
{ {
......
...@@ -1137,6 +1137,22 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, ...@@ -1137,6 +1137,22 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT; return lut_type != BT_COEX_LOOSE_LUT;
} }
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
{
/* there is no other antenna, shared antenna is always available */
if (mvm->cfg->bt_shared_single_ant)
return true;
if (ant & mvm->cfg->non_shared_ant)
return true;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{ {
/* there is no other antenna, shared antenna is always available */ /* there is no other antenna, shared antenna is always available */
......
...@@ -612,7 +612,9 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) ...@@ -612,7 +612,9 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
BT_VALID_ANT_ISOLATION_THRS | BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS | BT_VALID_TXTX_DELTA_FREQ_THRS |
BT_VALID_TXRX_MAX_FREQ_0 | BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO); BT_VALID_SYNC_TO_SCO |
BT_VALID_TTC |
BT_VALID_RRC);
if (IWL_MVM_BT_COEX_SYNC2SCO) if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
...@@ -628,6 +630,12 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm) ...@@ -628,6 +630,12 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
} }
if (IWL_MVM_BT_COEX_TTC)
bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
if (IWL_MVM_BT_COEX_RRC)
bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
if (mvm->cfg->bt_shared_single_ant) if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant)); sizeof(iwl_single_shared_ant));
...@@ -824,6 +832,9 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, ...@@ -824,6 +832,9 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
if (!vif->bss_conf.assoc) if (!vif->bss_conf.assoc)
smps_mode = IEEE80211_SMPS_AUTOMATIC; smps_mode = IEEE80211_SMPS_AUTOMATIC;
if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
smps_mode = IEEE80211_SMPS_AUTOMATIC;
IWL_DEBUG_COEX(data->mvm, IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n", "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading, mvmvif->id, data->notif->bt_status, bt_activity_grading,
...@@ -1156,6 +1167,12 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, ...@@ -1156,6 +1167,12 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
return lut_type != BT_COEX_LOOSE_LUT; return lut_type != BT_COEX_LOOSE_LUT;
} }
bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant)
{
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
return ag < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm) bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
{ {
u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading); u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
......
...@@ -92,6 +92,8 @@ ...@@ -92,6 +92,8 @@
#define IWL_MVM_BT_COEX_SYNC2SCO 1 #define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 0 #define IWL_MVM_BT_COEX_CORUNNING 0
#define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_MPLUT 1
#define IWL_MVM_BT_COEX_RRC 1
#define IWL_MVM_BT_COEX_TTC 1
#define IWL_MVM_BT_COEX_MPLUT_REG0 0x2e402280 #define IWL_MVM_BT_COEX_MPLUT_REG0 0x2e402280
#define IWL_MVM_BT_COEX_MPLUT_REG1 0x7711a751 #define IWL_MVM_BT_COEX_MPLUT_REG1 0x7711a751
#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
......
...@@ -878,6 +878,10 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, ...@@ -878,6 +878,10 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
}; };
int ret; int ret;
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
return ret;
ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
if (ret) if (ret)
return ret; return ret;
...@@ -962,6 +966,33 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, ...@@ -962,6 +966,33 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret; return ret;
} }
static int
iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct cfg80211_sched_scan_request *nd_config,
struct ieee80211_vif *vif)
{
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
int ret;
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
return ret;
/* rfkill release can be either for wowlan or netdetect */
if (wowlan->rfkill_release)
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
if (ret)
return ret;
ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
return ret;
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw, static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan, struct cfg80211_wowlan *wowlan,
bool test) bool test)
...@@ -970,7 +1001,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -970,7 +1001,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL; struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL; struct ieee80211_sta *ap_sta = NULL;
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = { struct iwl_d3_manager_config d3_cfg_cmd_data = {
/* /*
* Program the minimum sleep time to 10 seconds, as many * Program the minimum sleep time to 10 seconds, as many
...@@ -1007,8 +1037,22 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1007,8 +1037,22 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* if we're associated, this is wowlan */ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* if we're not associated, this must be netdetect */
if (!wowlan->nd_config && !mvm->nd_config) {
ret = 1;
goto out_noreset;
}
ret = iwl_mvm_netdetect_config(
mvm, wowlan, wowlan->nd_config ?: mvm->nd_config, vif);
if (ret)
goto out;
mvm->net_detect = true;
} else {
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
ap_sta = rcu_dereference_protected( ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
...@@ -1021,27 +1065,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1021,27 +1065,12 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
vif, mvmvif, ap_sta); vif, mvmvif, ap_sta);
if (ret) if (ret)
goto out_noreset; goto out_noreset;
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
goto out;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta); vif, mvmvif, ap_sta);
if (ret) if (ret)
goto out; goto out;
} else if (mvm->nd_config) {
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
goto out;
ret = iwl_mvm_scan_offload_start(mvm, vif, mvm->nd_config, mvm->net_detect = false;
mvm->nd_ies);
if (ret)
goto out;
} else {
ret = 1;
goto out_noreset;
} }
ret = iwl_mvm_power_update_device(mvm); ret = iwl_mvm_power_update_device(mvm);
...@@ -1087,6 +1116,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ...@@ -1087,6 +1116,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
if (iwl_mvm_is_d0i3_supported(mvm)) { if (iwl_mvm_is_d0i3_supported(mvm)) {
mutex_lock(&mvm->d0i3_suspend_mutex); mutex_lock(&mvm->d0i3_suspend_mutex);
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
...@@ -1465,9 +1495,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, ...@@ -1465,9 +1495,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
return true; return true;
} }
/* releases the MVM mutex */ static struct iwl_wowlan_status *
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct ieee80211_vif *vif)
{ {
u32 base = mvm->error_event_table; u32 base = mvm->error_event_table;
struct error_table_start { struct error_table_start {
...@@ -1479,19 +1508,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -1479,19 +1508,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
.id = WOWLAN_GET_STATUSES, .id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB, .flags = CMD_WANT_SKB,
}; };
struct iwl_wowlan_status_data status; struct iwl_wowlan_status *status, *fw_status;
struct iwl_wowlan_status *fw_status; int ret, len, status_size;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
iwl_trans_read_mem_bytes(mvm->trans, base, iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info)); &err_info, sizeof(err_info));
if (err_info.valid) { if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n", IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
err_info.valid); err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = { struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true, .rfkill_release = true,
...@@ -1499,7 +1524,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -1499,7 +1524,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ieee80211_report_wowlan_wakeup(vif, &wakeup, ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL); GFP_KERNEL);
} }
goto out_unlock; return ERR_PTR(-EIO);
} }
/* only for tracing for now */ /* only for tracing for now */
...@@ -1510,22 +1535,53 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -1510,22 +1535,53 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd(mvm, &cmd); ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) { if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret); IWL_ERR(mvm, "failed to query status (%d)\n", ret);
goto out_unlock; return ERR_PTR(ret);
} }
/* RF-kill already asserted again... */ /* RF-kill already asserted again... */
if (!cmd.resp_pkt) if (!cmd.resp_pkt) {
goto out_unlock; ret = -ERFKILL;
goto out_free_resp;
}
status_size = sizeof(*fw_status); status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt); len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) { if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
ret = -EIO;
goto out_free_resp;
}
status = (void *)cmd.resp_pkt->data;
if (len != (status_size +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
ret = -EIO;
goto out_free_resp; goto out_free_resp;
} }
fw_status = (void *)cmd.resp_pkt->data; fw_status = kmemdup(status, len, GFP_KERNEL);
out_free_resp:
iwl_free_resp(&cmd);
return ret ? ERR_PTR(ret) : fw_status;
}
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_wowlan_status_data status;
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
status.pattern_number = le16_to_cpu(fw_status->pattern_number); status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
...@@ -1538,17 +1594,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -1538,17 +1594,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
le32_to_cpu(fw_status->wake_packet_bufsize); le32_to_cpu(fw_status->wake_packet_bufsize);
status.wake_packet = fw_status->wake_packet; status.wake_packet = fw_status->wake_packet;
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
/* still at hard-coded place 0 for D3 image */ /* still at hard-coded place 0 for D3 image */
ap_sta = rcu_dereference_protected( ap_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[0], mvm->fw_id_to_mac_id[0],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) if (IS_ERR_OR_NULL(ap_sta))
goto out_free_resp; goto out_free;
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) { for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
...@@ -1565,16 +1616,42 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, ...@@ -1565,16 +1616,42 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status); keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
iwl_free_resp(&cmd); kfree(fw_status);
return keep; return keep;
out_free_resp: out_free:
iwl_free_resp(&cmd); kfree(fw_status);
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return false; return false;
} }
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
struct iwl_wowlan_status *fw_status;
u32 reasons = 0;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
if (!IS_ERR_OR_NULL(fw_status))
reasons = le32_to_cpu(fw_status->wakeup_reasons);
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
/* TODO: read and check if it was netdetect */
wakeup_report = NULL;
}
mutex_unlock(&mvm->mutex);
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
{ {
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
...@@ -1632,11 +1709,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -1632,11 +1709,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */ /* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm); iwl_mvm_read_d3_sram(mvm);
if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif);
} else {
keep = iwl_mvm_query_wakeup_reasons(mvm, vif); keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
if (keep) if (keep)
mvm->keep_vif = vif; mvm->keep_vif = vif;
#endif #endif
}
/* has unlocked the mutex, so skip that */ /* has unlocked the mutex, so skip that */
goto out; goto out;
...@@ -1651,6 +1732,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -1651,6 +1732,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* return 1 to reconfigure the device */ /* return 1 to reconfigure the device */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
return 1; return 1;
} }
...@@ -1658,18 +1740,10 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) ...@@ -1658,18 +1740,10 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (iwl_mvm_is_d0i3_supported(mvm)) { iwl_trans_resume(mvm->trans);
bool exit_now;
mutex_lock(&mvm->d0i3_suspend_mutex); if (iwl_mvm_is_d0i3_supported(mvm))
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
&mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
if (exit_now)
_iwl_mvm_exit_d0i3(mvm);
return 0; return 0;
}
return __iwl_mvm_resume(mvm, false); return __iwl_mvm_resume(mvm, false);
} }
......
...@@ -936,7 +936,11 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, ...@@ -936,7 +936,11 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (scan_rx_ant & ~mvm->fw->valid_rx_ant) if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
return -EINVAL; return -EINVAL;
if (mvm->scan_rx_ant != scan_rx_ant) {
mvm->scan_rx_ant = scan_rx_ant; mvm->scan_rx_ant = scan_rx_ant;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
iwl_mvm_config_scan(mvm);
}
return count; return count;
} }
...@@ -1194,14 +1198,8 @@ static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf, ...@@ -1194,14 +1198,8 @@ static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
kfree(mvm->nd_config->match_sets); kfree(mvm->nd_config->match_sets);
kfree(mvm->nd_config); kfree(mvm->nd_config);
mvm->nd_config = NULL; mvm->nd_config = NULL;
kfree(mvm->nd_ies);
mvm->nd_ies = NULL;
} }
mvm->nd_ies = kzalloc(sizeof(*mvm->nd_ies), GFP_KERNEL);
if (!mvm->nd_ies)
return -ENOMEM;
mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) + mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) +
(11 * sizeof(struct ieee80211_channel *)), (11 * sizeof(struct ieee80211_channel *)),
GFP_KERNEL); GFP_KERNEL);
...@@ -1258,8 +1256,6 @@ static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf, ...@@ -1258,8 +1256,6 @@ static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
kfree(mvm->nd_config->match_sets); kfree(mvm->nd_config->match_sets);
kfree(mvm->nd_config); kfree(mvm->nd_config);
mvm->nd_config = NULL; mvm->nd_config = NULL;
kfree(mvm->nd_ies);
mvm->nd_ies = NULL;
out: out:
return ret; return ret;
} }
......
...@@ -84,6 +84,8 @@ ...@@ -84,6 +84,8 @@
* @BT_COEX_SYNC2SCO: * @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING: * @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT: * @BT_COEX_MPLUT:
* @BT_COEX_TTC:
* @BT_COEX_RRC:
* *
* The COEX_MODE must be set for each command. Even if it is not changed. * The COEX_MODE must be set for each command. Even if it is not changed.
*/ */
...@@ -100,6 +102,8 @@ enum iwl_bt_coex_flags { ...@@ -100,6 +102,8 @@ enum iwl_bt_coex_flags {
BT_COEX_SYNC2SCO = BIT(7), BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8), BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9), BT_COEX_MPLUT = BIT(9),
BT_COEX_TTC = BIT(20),
BT_COEX_RRC = BIT(21),
}; };
/* /*
...@@ -127,6 +131,8 @@ enum iwl_bt_coex_valid_bit_msk { ...@@ -127,6 +131,8 @@ enum iwl_bt_coex_valid_bit_msk {
BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16), BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
BT_VALID_TXRX_MAX_FREQ_0 = BIT(17), BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
BT_VALID_SYNC_TO_SCO = BIT(18), BT_VALID_SYNC_TO_SCO = BIT(18),
BT_VALID_TTC = BIT(20),
BT_VALID_RRC = BIT(21),
}; };
/** /**
...@@ -506,7 +512,8 @@ struct iwl_bt_coex_profile_notif_old { ...@@ -506,7 +512,8 @@ struct iwl_bt_coex_profile_notif_old {
u8 bt_agg_traffic_load; u8 bt_agg_traffic_load;
u8 bt_ci_compliance; u8 bt_ci_compliance;
u8 ttc_enabled; u8 ttc_enabled;
__le16 reserved; u8 rrc_enabled;
u8 reserved;
__le32 primary_ch_lut; __le32 primary_ch_lut;
__le32 secondary_ch_lut; __le32 secondary_ch_lut;
......
...@@ -370,7 +370,7 @@ struct iwl_beacon_filter_cmd { ...@@ -370,7 +370,7 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_DEBUG_FLAG_DEFAULT 0 #define IWL_BF_DEBUG_FLAG_DEFAULT 0
#define IWL_BF_DEBUG_FLAG_D0I3 0 #define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50 #define IWL_BF_ESCAPE_TIMER_DEFAULT 0
#define IWL_BF_ESCAPE_TIMER_D0I3 0 #define IWL_BF_ESCAPE_TIMER_D0I3 0
#define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0 #define IWL_BF_ESCAPE_TIMER_MIN 0
......
...@@ -794,4 +794,257 @@ struct iwl_periodic_scan_complete { ...@@ -794,4 +794,257 @@ struct iwl_periodic_scan_complete {
__le32 reserved; __le32 reserved;
} __packed; } __packed;
/* UMAC Scan API */
/**
* struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
* @size: size of the command (not including header)
* @reserved0: for future use and alignment
* @ver: API version number
*/
struct iwl_mvm_umac_cmd_hdr {
__le16 size;
u8 reserved0;
u8 ver;
} __packed;
#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1),
SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2),
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3),
SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8),
SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9),
SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10),
SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11),
SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12),
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13),
SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14),
SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15),
SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16),
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17),
SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18),
SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19),
SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20),
SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21),
/* Bits 26-31 are for num of channels in channel_array */
#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
};
enum scan_config_rates {
/* OFDM basic rates */
SCAN_CONFIG_RATE_6M = BIT(0),
SCAN_CONFIG_RATE_9M = BIT(1),
SCAN_CONFIG_RATE_12M = BIT(2),
SCAN_CONFIG_RATE_18M = BIT(3),
SCAN_CONFIG_RATE_24M = BIT(4),
SCAN_CONFIG_RATE_36M = BIT(5),
SCAN_CONFIG_RATE_48M = BIT(6),
SCAN_CONFIG_RATE_54M = BIT(7),
/* CCK basic rates */
SCAN_CONFIG_RATE_1M = BIT(8),
SCAN_CONFIG_RATE_2M = BIT(9),
SCAN_CONFIG_RATE_5M = BIT(10),
SCAN_CONFIG_RATE_11M = BIT(11),
/* Bits 16-27 are for supported rates */
#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
};
enum iwl_channel_flags {
IWL_CHANNEL_FLAG_EBS = BIT(0),
IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1),
IWL_CHANNEL_FLAG_EBS_ADD = BIT(2),
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3),
};
/**
* struct iwl_scan_config
* @hdr: umac command header
* @flags: enum scan_config_flags
* @tx_chains: valid_tx antenna - ANT_* definitions
* @rx_chains: valid_rx antenna - ANT_* definitions
* @legacy_rates: default legacy rates - enum scan_config_rates
* @out_of_channel_time: default max out of serving channel time
* @suspend_time: default max suspend time
* @dwell_active: default dwell time for active scan
* @dwell_passive: default dwell time for passive scan
* @dwell_fragmented: default dwell time for fragmented scan
* @reserved: for future use and alignment
* @mac_addr: default mac address to be used in probes
* @bcast_sta_id: the index of the station in the fw
* @channel_flags: default channel flags - enum iwl_channel_flags
* scan_config_channel_flag
* @channel_array: default supported channels
*/
struct iwl_scan_config {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 tx_chains;
__le32 rx_chains;
__le32 legacy_rates;
__le32 out_of_channel_time;
__le32 suspend_time;
u8 dwell_active;
u8 dwell_passive;
u8 dwell_fragmented;
u8 reserved;
u8 mac_addr[ETH_ALEN];
u8 bcast_sta_id;
u8 channel_flags;
u8 channel_array[];
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
/**
* iwl_umac_scan_flags
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
* can be preempted by other scan requests with higher priority.
* The low priority scan is aborted.
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
* when scan starts.
*/
enum iwl_umac_scan_flags {
IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0),
IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1),
};
enum iwl_umac_scan_uid_offsets {
IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0,
IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8,
};
enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1),
IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2),
IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3),
IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4),
IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5),
IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6),
IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7),
IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8),
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9)
};
/**
* struct iwl_scan_channel_cfg_umac
* @flags: bitmap - 0-19: directed scan to i'th ssid.
* @channel_num: channel number 1-13 etc.
* @iter_count: repetition count for the channel.
* @iter_interval: interval between two scan interations on one channel.
*/
struct iwl_scan_channel_cfg_umac {
__le32 flags;
u8 channel_num;
u8 iter_count;
__le16 iter_interval;
} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
/**
* struct iwl_scan_umac_schedule
* @interval: interval in seconds between scan iterations
* @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
* @reserved: for alignment and future use
*/
struct iwl_scan_umac_schedule {
__le16 interval;
u8 iter_count;
u8 reserved;
} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
/**
* struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
* parameters following channels configuration array.
* @schedule: two scheduling plans.
* @delay: delay in TUs before starting the first scan iteration
* @reserved: for future use and alignment
* @preq: probe request with IEs blocks
* @direct_scan: list of SSIDs for directed active scan
*/
struct iwl_scan_req_umac_tail {
/* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
struct iwl_scan_umac_schedule schedule[2];
__le16 delay;
__le16 reserved;
/* SCAN_PROBE_PARAMS_API_S_VER_1 */
struct iwl_scan_probe_req preq;
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
} __packed;
/**
* struct iwl_scan_req_umac
* @hdr: umac command header
* @flags: &enum iwl_umac_scan_flags
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
* @reserved1: for future use and alignment
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
* @max_out_time: max out of serving channel time
* @suspend_time: max suspend time
* @scan_priority: scan internal prioritization &enum iwl_scan_priority
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved2: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
struct iwl_scan_req_umac {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 flags;
__le32 uid;
__le32 ooc_priority;
/* SCAN_GENERAL_PARAMS_API_S_VER_1 */
__le32 general_flags;
u8 reserved1;
u8 active_dwell;
u8 passive_dwell;
u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
/* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
__le16 reserved2;
u8 data[];
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
/**
* struct iwl_umac_scan_abort
* @hdr: umac command header
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
*/
struct iwl_umac_scan_abort {
struct iwl_mvm_umac_cmd_hdr hdr;
__le32 uid;
__le32 flags;
} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
/**
* struct iwl_umac_scan_complete
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @last_schedule: last scheduling line
* @last_iter: last scan iteration number
* @scan status: &enum iwl_scan_offload_complete_status
* @ebs_status: &enum iwl_scan_ebs_status
* @time_from_last_iter: time elapsed from last iteration
* @reserved: for future use
*/
struct iwl_umac_scan_complete {
__le32 uid;
u8 last_schedule;
u8 last_iter;
u8 status;
u8 ebs_status;
__le32 time_from_last_iter;
__le32 reserved;
} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
#endif #endif
...@@ -106,6 +106,12 @@ enum { ...@@ -106,6 +106,12 @@ enum {
DBG_CFG = 0x9, DBG_CFG = 0x9,
ANTENNA_COUPLING_NOTIFICATION = 0xa, ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* UMAC scan commands */
SCAN_CFG_CMD = 0xc,
SCAN_REQ_UMAC = 0xd,
SCAN_ABORT_UMAC = 0xe,
SCAN_COMPLETE_UMAC = 0xf,
/* station table */ /* station table */
ADD_STA_KEY = 0x17, ADD_STA_KEY = 0x17,
ADD_STA = 0x18, ADD_STA = 0x18,
...@@ -122,6 +128,11 @@ enum { ...@@ -122,6 +128,11 @@ enum {
/* global key */ /* global key */
WEP_KEY = 0x20, WEP_KEY = 0x20,
/* TDLS */
TDLS_CHANNEL_SWITCH_CMD = 0x27,
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
TDLS_CONFIG_CMD = 0xa7,
/* MAC and Binding commands */ /* MAC and Binding commands */
MAC_CONTEXT_CMD = 0x28, MAC_CONTEXT_CMD = 0x28,
TIME_EVENT_CMD = 0x29, /* both CMD and response */ TIME_EVENT_CMD = 0x29, /* both CMD and response */
...@@ -190,6 +201,8 @@ enum { ...@@ -190,6 +201,8 @@ enum {
/* Power - new power table command */ /* Power - new power table command */
MAC_PM_POWER_TABLE = 0xa9, MAC_PM_POWER_TABLE = 0xa9,
MFUART_LOAD_NOTIFICATION = 0xb1,
REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1, REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5, BA_NOTIF = 0xc5,
...@@ -1200,6 +1213,21 @@ struct iwl_missed_beacons_notif { ...@@ -1200,6 +1213,21 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons; __le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
* struct iwl_mfuart_load_notif - mfuart image version & status
* ( MFUART_LOAD_NOTIFICATION = 0xb1 )
* @installed_ver: installed image version
* @external_ver: external image version
* @status: MFUART loading status
* @duration: MFUART loading time
*/
struct iwl_mfuart_load_notif {
__le32 installed_ver;
__le32 external_ver;
__le32 status;
__le32 duration;
} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
/** /**
* struct iwl_set_calib_default_cmd - set default value for calibration. * struct iwl_set_calib_default_cmd - set default value for calibration.
* ( SET_CALIB_DEFAULT_CMD = 0x8e ) * ( SET_CALIB_DEFAULT_CMD = 0x8e )
...@@ -1711,4 +1739,145 @@ struct iwl_scd_txq_cfg_cmd { ...@@ -1711,4 +1739,145 @@ struct iwl_scd_txq_cfg_cmd {
u8 flags; u8 flags;
} __packed; } __packed;
/***********************************
* TDLS API
***********************************/
/* Type of TDLS request */
enum iwl_tdls_channel_switch_type {
TDLS_SEND_CHAN_SW_REQ = 0,
TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
TDLS_MOVE_CH,
}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
/**
* Switch timing sub-element in a TDLS channel-switch command
* @frame_timestamp: GP2 timestamp of channel-switch request/response packet
* received from peer
* @max_offchan_duration: What amount of microseconds out of a DTIM is given
* to the TDLS off-channel communication. For instance if the DTIM is
* 200TU and the TDLS peer is to be given 25% of the time, the value
* given will be 50TU, or 50 * 1024 if translated into microseconds.
* @switch_time: switch time the peer sent in its channel switch timing IE
* @switch_timout: switch timeout the peer sent in its channel switch timing IE
*/
struct iwl_tdls_channel_switch_timing {
__le32 frame_timestamp; /* GP2 time of peer packet Rx */
__le32 max_offchan_duration; /* given in micro-seconds */
__le32 switch_time; /* given in micro-seconds */
__le32 switch_timeout; /* given in micro-seconds */
} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
/**
* TDLS channel switch frame template
*
* A template representing a TDLS channel-switch request or response frame
*
* @switch_time_offset: offset to the channel switch timing IE in the template
* @tx_cmd: Tx parameters for the frame
* @data: frame data
*/
struct iwl_tdls_channel_switch_frame {
__le32 switch_time_offset;
struct iwl_tx_cmd tx_cmd;
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
* TDLS channel switch command
*
* The command is sent to initiate a channel switch and also in response to
* incoming TDLS channel-switch request/response packets from remote peers.
*
* @switch_type: see &enum iwl_tdls_channel_switch_type
* @peer_sta_id: station id of TDLS peer
* @ci: channel we switch to
* @timing: timing related data for command
* @frame: channel-switch request/response template, depending to switch_type
*/
struct iwl_tdls_channel_switch_cmd {
u8 switch_type;
__le32 peer_sta_id;
struct iwl_fw_channel_info ci;
struct iwl_tdls_channel_switch_timing timing;
struct iwl_tdls_channel_switch_frame frame;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**
* TDLS channel switch start notification
*
* @status: non-zero on success
* @offchannel_duration: duration given in microseconds
* @sta_id: peer currently performing the channel-switch with
*/
struct iwl_tdls_channel_switch_notif {
__le32 status;
__le32 offchannel_duration;
__le32 sta_id;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
/**
* TDLS station info
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
* @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
* @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
*/
struct iwl_tdls_sta_info {
u8 sta_id;
u8 tx_to_peer_tid;
__le16 tx_to_peer_ssn;
__le32 is_initiator;
} __packed; /* TDLS_STA_INFO_VER_1 */
/**
* TDLS basic config command
*
* @id_and_color: MAC id and color being configured
* @tdls_peer_count: amount of currently connected TDLS peers
* @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
* @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
* @sta_info: per-station info. Only the first tdls_peer_count entries are set
* @pti_req_data_offset: offset of network-level data for the PTI template
* @pti_req_tx_cmd: Tx parameters for PTI request template
* @pti_req_template: PTI request template data
*/
struct iwl_tdls_config_cmd {
__le32 id_and_color; /* mac id and color */
u8 tdls_peer_count;
u8 tx_to_ap_tid;
__le16 tx_to_ap_ssn;
struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
struct iwl_tx_cmd pti_req_tx_cmd;
u8 pti_req_template[0];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
/**
* TDLS per-station config information from FW
*
* @sta_id: station id of the TDLS peer
* @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
* the peer
*/
struct iwl_tdls_config_sta_info_res {
__le16 sta_id;
__le16 tx_to_peer_last_seq;
} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
/**
* TDLS config information from FW
*
* @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
* @sta_info: per-station TDLS config information
*/
struct iwl_tdls_config_res {
__le32 tx_to_ap_last_seq;
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -227,6 +227,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -227,6 +227,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
st_fwrd_space.addr = mvm->sf_space.addr; st_fwrd_space.addr = mvm->sf_space.addr;
st_fwrd_space.size = mvm->sf_space.size; st_fwrd_space.size = mvm->sf_space.size;
ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
if (ret) {
IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
return ret;
}
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
...@@ -462,6 +466,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -462,6 +466,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
/* reset quota debouncing buffer - 0xff will yield invalid data */ /* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
...@@ -501,6 +507,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -501,6 +507,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret) if (ret)
goto error; goto error;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;
}
/* allow FW/transport low power modes if not during restart */ /* allow FW/transport low power modes if not during restart */
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
...@@ -587,3 +599,19 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -587,3 +599,19 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
le32_to_cpu(radio_version->radio_dash)); le32_to_cpu(radio_version->radio_dash));
return 0; return 0;
} }
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
IWL_DEBUG_INFO(mvm,
"MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
le32_to_cpu(mfuart_notif->installed_ver),
le32_to_cpu(mfuart_notif->external_ver),
le32_to_cpu(mfuart_notif->status),
le32_to_cpu(mfuart_notif->duration));
return 0;
}
...@@ -83,11 +83,15 @@ struct iwl_mvm_mac_iface_iterator_data { ...@@ -83,11 +83,15 @@ struct iwl_mvm_mac_iface_iterator_data {
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
u32 used_hw_queues;
enum iwl_tsf_id preferred_tsf; enum iwl_tsf_id preferred_tsf;
bool found_vif; bool found_vif;
}; };
struct iwl_mvm_hw_queues_iface_iterator_data {
struct ieee80211_vif *exclude_vif;
unsigned long used_hw_queues;
};
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
...@@ -213,6 +217,54 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) ...@@ -213,6 +217,54 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
return qmask; return qmask;
} }
static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
/* exclude the given vif */
if (vif == data->exclude_vif)
return;
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
}
static void iwl_mvm_mac_sta_hw_queues_iter(void *_data,
struct ieee80211_sta *sta)
{
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* Mark the queues used by the sta */
data->used_hw_queues |= mvmsta->tfd_queue_msk;
}
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif)
{
struct iwl_mvm_hw_queues_iface_iterator_data data = {
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue) |
BIT(IWL_MVM_CMD_QUEUE),
};
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_iface_hw_queues_iter, &data);
/* don't assign the same hw queues as TDLS stations */
ieee80211_iterate_stations_atomic(mvm->hw,
iwl_mvm_mac_sta_hw_queues_iter,
&data);
return data.used_hw_queues;
}
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif) struct ieee80211_vif *vif)
{ {
...@@ -225,9 +277,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, ...@@ -225,9 +277,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
return; return;
} }
/* Mark the queues used by the vif */
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
/* Mark MAC IDs as used by clearing the available bit, and /* Mark MAC IDs as used by clearing the available bit, and
* (below) mark TSFs as used if their existing use is not * (below) mark TSFs as used if their existing use is not
* compatible with the new interface type. * compatible with the new interface type.
...@@ -274,10 +323,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -274,10 +323,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
/* no preference yet */ /* no preference yet */
.preferred_tsf = NUM_TSF_IDS, .preferred_tsf = NUM_TSF_IDS,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
BIT(mvm->aux_queue) |
BIT(IWL_MVM_CMD_QUEUE),
.found_vif = false, .found_vif = false,
}; };
u32 ac; u32 ac;
...@@ -316,6 +361,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -316,6 +361,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data); iwl_mvm_mac_iface_iterator, &data);
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
/* /*
* In the case we're getting here during resume, it's similar to * In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find * firmware restart, and with RESUME_ALL the iterator will find
...@@ -365,8 +412,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, ...@@ -365,8 +412,6 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0; return 0;
} }
used_hw_queues = data.used_hw_queues;
/* Find available queues, and allocate them to the ACs */ /* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues, u8 queue = find_first_zero_bit(&used_hw_queues,
...@@ -1218,17 +1263,25 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -1218,17 +1263,25 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
} }
static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
struct ieee80211_vif *csa_vif, u32 gp2) struct ieee80211_vif *csa_vif, u32 gp2,
bool tx_success)
{ {
struct iwl_mvm_vif *mvmvif = struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(csa_vif); iwl_mvm_vif_from_mac80211(csa_vif);
/* Don't start to countdown from a failed beacon */
if (!tx_success && !mvmvif->csa_countdown)
return;
mvmvif->csa_countdown = true;
if (!ieee80211_csa_is_complete(csa_vif)) { if (!ieee80211_csa_is_complete(csa_vif)) {
int c = ieee80211_csa_update_counter(csa_vif); int c = ieee80211_csa_update_counter(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p && if (csa_vif->p2p &&
!iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) { !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
tx_success) {
u32 rel_time = (c + 1) * u32 rel_time = (c + 1) *
csa_vif->bss_conf.beacon_int - csa_vif->bss_conf.beacon_int -
IWL_MVM_CHANNEL_SWITCH_TIME_GO; IWL_MVM_CHANNEL_SWITCH_TIME_GO;
...@@ -1255,6 +1308,7 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, ...@@ -1255,6 +1308,7 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct ieee80211_vif *csa_vif; struct ieee80211_vif *csa_vif;
struct ieee80211_vif *tx_blocked_vif; struct ieee80211_vif *tx_blocked_vif;
u64 tsf; u64 tsf;
u16 status;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -1271,18 +1325,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, ...@@ -1271,18 +1325,18 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
tsf = le64_to_cpu(beacon->tsf); tsf = le64_to_cpu(beacon->tsf);
} }
status = le16_to_cpu(beacon_notify_hdr->status.status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm, IWL_DEBUG_RX(mvm,
"beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
le16_to_cpu(beacon_notify_hdr->status.status) & status, beacon_notify_hdr->failure_frame, tsf,
TX_STATUS_MSK,
beacon_notify_hdr->failure_frame, tsf,
mvm->ap_last_beacon_gp2, mvm->ap_last_beacon_gp2,
le32_to_cpu(beacon_notify_hdr->initial_rate)); le32_to_cpu(beacon_notify_hdr->initial_rate));
csa_vif = rcu_dereference_protected(mvm->csa_vif, csa_vif = rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (unlikely(csa_vif && csa_vif->csa_active)) if (unlikely(csa_vif && csa_vif->csa_active))
iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2); iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
(status == TX_STATUS_SUCCESS));
tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
......
...@@ -254,6 +254,26 @@ static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, ...@@ -254,6 +254,26 @@ static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
spin_unlock_bh(&mvm->refs_lock); spin_unlock_bh(&mvm->refs_lock);
} }
bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
{
int i;
bool taken = false;
if (!iwl_mvm_is_d0i3_supported(mvm))
return true;
spin_lock_bh(&mvm->refs_lock);
for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
if (mvm->refs[i]) {
taken = true;
break;
}
}
spin_unlock_bh(&mvm->refs_lock);
return taken;
}
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{ {
iwl_mvm_ref(mvm, ref_type); iwl_mvm_ref(mvm, ref_type);
...@@ -303,7 +323,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -303,7 +323,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC; IEEE80211_RADIOTAP_MCS_HAVE_STBC;
hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
hw->rate_control_algorithm = "iwl-mvm-rs"; hw->rate_control_algorithm = "iwl-mvm-rs";
/* /*
...@@ -323,8 +344,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -323,8 +344,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
} }
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->vif_data_size = sizeof(struct iwl_mvm_vif);
...@@ -403,7 +429,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -403,7 +429,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS; NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
if (mvm->fw->ucode_capa.capa[0] & if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
...@@ -441,7 +468,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -441,7 +468,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_EAP_IDENTITY_REQ |
WIPHY_WOWLAN_RFKILL_RELEASE; WIPHY_WOWLAN_RFKILL_RELEASE |
WIPHY_WOWLAN_NET_DETECT;
if (!iwlwifi_mod_params.sw_crypto) if (!iwlwifi_mod_params.sw_crypto)
mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
...@@ -450,6 +478,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -450,6 +478,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
hw->wiphy->wowlan = &mvm->wowlan; hw->wiphy->wowlan = &mvm->wowlan;
} }
...@@ -464,6 +493,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -464,6 +493,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret) if (ret)
return ret; return ret;
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
}
if (mvm->fw->ucode_capa.capa[0] &
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
ret = ieee80211_register_hw(mvm->hw); ret = ieee80211_register_hw(mvm->hw);
if (ret) if (ret)
iwl_mvm_leds_exit(mvm); iwl_mvm_leds_exit(mvm);
...@@ -819,6 +859,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) ...@@ -819,6 +859,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{ {
/* clear the D3 reconfig, we only need it to avoid dumping a
* firmware coredump on reconfiguration, we shouldn't do that
* on D3->D0 transition
*/
if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
iwl_mvm_fw_error_dump(mvm); iwl_mvm_fw_error_dump(mvm);
iwl_trans_stop_device(mvm->trans); iwl_trans_stop_device(mvm->trans);
...@@ -840,6 +885,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -840,6 +885,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm); iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
...@@ -912,9 +958,34 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) ...@@ -912,9 +958,34 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
/* allow transport/FW low power modes */ /* allow transport/FW low power modes */
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
/*
* If we have TDLS peers, remove them. We don't know the last seqno/PN
* of packets the FW sent out, so we must reconnect.
*/
iwl_mvm_teardown_tdls_peers(mvm);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
{
bool exit_now;
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
mutex_lock(&mvm->d0i3_suspend_mutex);
__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
&mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
if (exit_now) {
IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
_iwl_mvm_exit_d0i3(mvm);
}
}
static void static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type) enum ieee80211_reconfig_type reconfig_type)
...@@ -926,6 +997,7 @@ iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, ...@@ -926,6 +997,7 @@ iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
iwl_mvm_restart_complete(mvm); iwl_mvm_restart_complete(mvm);
break; break;
case IEEE80211_RECONFIG_TYPE_SUSPEND: case IEEE80211_RECONFIG_TYPE_SUSPEND:
iwl_mvm_resume_complete(mvm);
break; break;
} }
} }
...@@ -1889,9 +1961,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, ...@@ -1889,9 +1961,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
req->n_channels > mvm->fw->ucode_capa.n_scan_channels) req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL; return -EINVAL;
if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED); ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
if (ret) if (ret)
return ret; return ret;
}
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
...@@ -1902,7 +1976,9 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, ...@@ -1902,7 +1976,9 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
else if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req); ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
else else
ret = iwl_mvm_scan_request(mvm, vif, req); ret = iwl_mvm_scan_request(mvm, vif, req);
...@@ -2119,6 +2195,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ...@@ -2119,6 +2195,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
if (sta->tdls && ret == 0) {
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
}
return ret; return ret;
} }
...@@ -2201,9 +2286,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, ...@@ -2201,9 +2286,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret; int ret;
if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS); ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
if (ret) if (ret)
return ret; return ret;
}
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
...@@ -2223,11 +2310,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, ...@@ -2223,11 +2310,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
goto out; goto out;
} }
mvm->scan_status = IWL_MVM_SCAN_SCHED;
ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies); ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
if (ret) if (ret)
mvm->scan_status = IWL_MVM_SCAN_NONE; mvm->scan_status = IWL_MVM_SCAN_NONE;
out: out:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret; return ret;
...@@ -2245,6 +2331,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, ...@@ -2245,6 +2331,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
iwl_mvm_wait_for_async_handlers(mvm); iwl_mvm_wait_for_async_handlers(mvm);
return ret; return ret;
} }
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
...@@ -2273,12 +2360,16 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ...@@ -2273,12 +2360,16 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break; break;
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_WEP104:
/* /* For non-client mode, only use WEP keys for TX as we probably
* Support for TX only, at least for now, so accept * don't have a station yet anyway and would then have to keep
* the key and do nothing else. Then mac80211 will * track of the keys, linking them to each of the clients/peers
* pass it for TX but we don't have to use it for RX. * as they appear. For now, don't do that, for performance WEP
* offload doesn't really matter much, but we need it for some
* other offload features in client mode.
*/ */
if (vif->type != NL80211_IFTYPE_STATION)
return 0; return 0;
break;
default: default:
/* currently FW supports only one optional cipher scheme */ /* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes && if (hw->n_cipher_schemes &&
...@@ -2595,7 +2686,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) ...@@ -2595,7 +2686,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(mvm, "enter\n"); IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
iwl_mvm_stop_p2p_roc(mvm); iwl_mvm_stop_roc(mvm);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n"); IWL_DEBUG_MAC80211(mvm, "leave\n");
...@@ -2708,8 +2799,8 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ...@@ -2708,8 +2799,8 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
/* Unless it's a CSA flow we have nothing to do here */ /* only needed if we're switching chanctx (i.e. during CSA) */
if (vif->csa_active) { if (switching_chanctx) {
mvmvif->ap_ibss_active = true; mvmvif->ap_ibss_active = true;
break; break;
} }
...@@ -2753,23 +2844,32 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ...@@ -2753,23 +2844,32 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
} }
/* Handle binding during CSA */ /* Handle binding during CSA */
if ((vif->type == NL80211_IFTYPE_AP) || if (vif->type == NL80211_IFTYPE_AP) {
(switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) {
iwl_mvm_update_quotas(mvm, NULL); iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
} }
if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) { if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
struct iwl_mvm_sta *mvmsta; u32 duration = 2 * vif->bss_conf.beacon_int;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, /* iwl_mvm_protect_session() reads directly from the
mvmvif->ap_sta_id); * device (the system time), so make sure it is
* available.
*/
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
if (ret)
goto out_remove_binding;
if (WARN_ON(!mvmsta)) /* Protect the session to make sure we hear the first
goto out; * beacon on the new channel.
*/
iwl_mvm_protect_session(mvm, vif, duration, duration,
vif->bss_conf.beacon_int / 2,
true);
/* TODO: only re-enable after the first beacon */ iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
iwl_mvm_update_quotas(mvm, NULL);
} }
goto out; goto out;
...@@ -2803,7 +2903,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, ...@@ -2803,7 +2903,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_vif *disabled_vif = NULL; struct ieee80211_vif *disabled_vif = NULL;
struct iwl_mvm_sta *mvmsta;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -2818,9 +2917,11 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, ...@@ -2818,9 +2917,11 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
/* This part is triggered only during CSA */ /* This part is triggered only during CSA */
if (!vif->csa_active || !mvmvif->ap_ibss_active) if (!switching_chanctx || !mvmvif->ap_ibss_active)
goto out; goto out;
mvmvif->csa_countdown = false;
/* Set CS bit on all the stations */ /* Set CS bit on all the stations */
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
...@@ -2835,12 +2936,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, ...@@ -2835,12 +2936,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
disabled_vif = vif; disabled_vif = vif;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
mvmvif->ap_sta_id);
if (!WARN_ON(!mvmsta))
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
break; break;
default: default:
...@@ -2866,18 +2961,12 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, ...@@ -2866,18 +2961,12 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, static int
struct ieee80211_vif_chanctx_switch *vifs, iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
int n_vifs, struct ieee80211_vif_chanctx_switch *vifs)
enum ieee80211_chanctx_switch_mode mode)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret; int ret;
/* we only support SWAP_CONTEXTS and with a single-vif right now */
if (mode != CHANCTX_SWMODE_SWAP_CONTEXTS || n_vifs > 1)
return -EOPNOTSUPP;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
...@@ -2906,15 +2995,51 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, ...@@ -2906,15 +2995,51 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
out_reassign: out_reassign:
ret = __iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx); if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
if (ret) {
IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
goto out_restart; goto out_restart;
} }
ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
true)) {
IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
goto out_restart;
}
goto out;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
out:
mutex_unlock(&mvm->mutex);
return ret;
}
static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
struct ieee80211_vif_chanctx_switch *vifs)
{
int ret;
mutex_lock(&mvm->mutex);
__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
true); true);
if (ret) { if (ret) {
IWL_ERR(mvm,
"failed to assign new_ctx during channel switch\n");
goto out_reassign;
}
goto out;
out_reassign:
if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
true)) {
IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
goto out_restart; goto out_restart;
} }
...@@ -2927,6 +3052,34 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, ...@@ -2927,6 +3052,34 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
out: out:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret;
}
static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
/* we only support a single-vif right now */
if (n_vifs > 1)
return -EOPNOTSUPP;
switch (mode) {
case CHANCTX_SWMODE_SWAP_CONTEXTS:
ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
break;
case CHANCTX_SWMODE_REASSIGN_VIF:
ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret; return ret;
} }
...@@ -3012,27 +3165,134 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, ...@@ -3012,27 +3165,134 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
} }
#endif #endif
static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw, static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef) struct ieee80211_channel_switch *chsw)
{
/* By implementing this operation, we prevent mac80211 from
* starting its own channel switch timer, so that we can call
* ieee80211_chswitch_done() ourselves at the right time
* (which is when the absence time event starts).
*/
IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
"dummy channel switch op\n");
}
static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif; struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 apply_time;
int ret;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
csa_vif = rcu_dereference_protected(mvm->csa_vif, IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
switch (vif->type) {
case NL80211_IFTYPE_AP:
csa_vif =
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
if (WARN(csa_vif && csa_vif->csa_active, if (WARN_ONCE(csa_vif && csa_vif->csa_active,
"Another CSA is already in progress")) "Another CSA is already in progress")) {
ret = -EBUSY;
goto out_unlock; goto out_unlock;
}
IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
chandef->center_freq1);
rcu_assign_pointer(mvm->csa_vif, vif); rcu_assign_pointer(mvm->csa_vif, vif);
if (WARN_ONCE(mvmvif->csa_countdown,
"Previous CSA countdown didn't complete")) {
ret = -EBUSY;
goto out_unlock;
}
break;
case NL80211_IFTYPE_STATION:
/* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the
* GO/AP arrives.
*/
apply_time = chsw->device_timestamp +
((vif->bss_conf.beacon_int * (chsw->count - 1) -
IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
if (chsw->block_tx)
iwl_mvm_csa_client_absent(mvm, vif);
iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
apply_time);
if (mvmvif->bf_data.bf_enabled) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
if (ret)
goto out_unlock;
}
break;
default:
break;
}
mvmvif->ps_disabled = true;
ret = iwl_mvm_power_update_ps(mvm);
if (ret)
goto out_unlock;
/* we won't be on this channel any longer */
iwl_mvm_teardown_tdls_peers(mvm);
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret;
}
static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_STATION) {
struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
mvmvif->ap_sta_id);
if (WARN_ON(!mvmsta)) {
ret = -EIO;
goto out_unlock;
}
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
if (ret)
goto out_unlock;
iwl_mvm_stop_session_protection(mvm, vif);
}
mvmvif->ps_disabled = false;
ret = iwl_mvm_power_update_ps(mvm);
out_unlock:
mutex_unlock(&mvm->mutex);
return ret;
} }
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
...@@ -3041,31 +3301,44 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, ...@@ -3041,31 +3301,44 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif; struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
struct ieee80211_sta *sta;
int i;
u32 msk = 0;
if (!vif || vif->type != NL80211_IFTYPE_STATION) if (!vif || vif->type != NL80211_IFTYPE_STATION)
return; return;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
if (WARN_ON_ONCE(!mvmsta)) { /* flush the AP-station and all TDLS peers */
mutex_unlock(&mvm->mutex); for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->vif != vif)
continue;
/* make sure only TDLS peers or the AP are flushed */
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
msk |= mvmsta->tfd_queue_msk;
} }
if (drop) { if (drop) {
if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true)) if (iwl_mvm_flush_tx_path(mvm, msk, true))
IWL_ERR(mvm, "flush request fail\n"); IWL_ERR(mvm, "flush request fail\n");
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} else { } else {
u32 tfd_queue_msk = mvmsta->tfd_queue_msk;
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
/* this can take a while, and we may need/want other operations /* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held * to succeed while doing this, so do it without the mutex held
*/ */
iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_queue_msk); iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
} }
} }
...@@ -3114,7 +3387,13 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { ...@@ -3114,7 +3387,13 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim, .set_tim = iwl_mvm_set_tim,
.channel_switch_beacon = iwl_mvm_channel_switch_beacon, .channel_switch = iwl_mvm_channel_switch,
.pre_channel_switch = iwl_mvm_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
......
...@@ -87,12 +87,18 @@ ...@@ -87,12 +87,18 @@
/* A TimeUnit is 1024 microsecond */ /* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024) #define MSEC_TO_TU(_msec) (_msec*1000/1024)
/* This value represents the number of TUs before CSA "beacon 0" TBTT /* For GO, this value represents the number of TUs before CSA "beacon
* when the CSA time-event needs to be scheduled to start. It must be * 0" TBTT when the CSA time-event needs to be scheduled to start. It
* big enough to ensure that we switch in time. * must be big enough to ensure that we switch in time.
*/ */
#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
/* For client, this value represents the number of TUs before CSA
* "beacon 1" TBTT, instead. This is because we don't know when the
* GO/AP will be in the new channel, so we switch early enough.
*/
#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
/* /*
* This value (in TUs) is used to fine tune the CSA NoA end time which should * This value (in TUs) is used to fine tune the CSA NoA end time which should
* be just before "beacon 0" TBTT. * be just before "beacon 0" TBTT.
...@@ -269,6 +275,7 @@ enum iwl_mvm_ref_type { ...@@ -269,6 +275,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_NMI, IWL_MVM_REF_NMI,
IWL_MVM_REF_TM_CMD, IWL_MVM_REF_TM_CMD,
IWL_MVM_REF_EXIT_WORK, IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_PROTECT_CSA,
/* update debugfs.c when changing this */ /* update debugfs.c when changing this */
...@@ -399,6 +406,9 @@ struct iwl_mvm_vif { ...@@ -399,6 +406,9 @@ struct iwl_mvm_vif {
/* FW identified misbehaving AP */ /* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN]; u8 uapsd_misbehaving_bssid[ETH_ALEN];
/* Indicates that CSA countdown may be started */
bool csa_countdown;
}; };
static inline struct iwl_mvm_vif * static inline struct iwl_mvm_vif *
...@@ -519,6 +529,13 @@ enum { ...@@ -519,6 +529,13 @@ enum {
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
enum iwl_mvm_tdls_cs_state {
IWL_MVM_TDLS_SW_IDLE = 0,
IWL_MVM_TDLS_SW_REQ_SENT,
IWL_MVM_TDLS_SW_REQ_RCVD,
IWL_MVM_TDLS_SW_ACTIVE,
};
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
struct device *dev; struct device *dev;
...@@ -578,6 +595,7 @@ struct iwl_mvm { ...@@ -578,6 +595,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk; struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT]; atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions; u8 rx_ba_sessions;
/* configured by mac80211 */ /* configured by mac80211 */
...@@ -588,6 +606,10 @@ struct iwl_mvm { ...@@ -588,6 +606,10 @@ struct iwl_mvm {
void *scan_cmd; void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* UMAC scan tracking */
u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
u8 scan_seq_num, sched_scan_seq_num;
/* rx chain antennas set through debugfs for the scan command */ /* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant; u8 scan_rx_ant;
...@@ -662,7 +684,8 @@ struct iwl_mvm { ...@@ -662,7 +684,8 @@ struct iwl_mvm {
/* sched scan settings for net detect */ /* sched scan settings for net detect */
struct cfg80211_sched_scan_request *nd_config; struct cfg80211_sched_scan_request *nd_config;
struct ieee80211_scan_ies *nd_ies; struct ieee80211_scan_ies nd_ies;
bool net_detect;
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */ u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
bool d3_test_active; bool d3_test_active;
...@@ -735,6 +758,28 @@ struct iwl_mvm { ...@@ -735,6 +758,28 @@ struct iwl_mvm {
u32 ap_last_beacon_gp2; u32 ap_last_beacon_gp2;
u8 low_latency_agg_frame_limit; u8 low_latency_agg_frame_limit;
/* TDLS channel switch data */
struct {
struct delayed_work dwork;
enum iwl_mvm_tdls_cs_state state;
/*
* Current cs sta - might be different from periodic cs peer
* station. Value is meaningless when the cs-state is idle.
*/
u8 cur_sta_id;
/* TDLS periodic channel-switch peer */
struct {
u8 sta_id;
u8 op_class;
bool initiator; /* are we the link initiator */
struct cfg80211_chan_def chandef;
struct sk_buff *skb; /* ch sw template */
u32 ch_sw_tm_ie;
} peer;
} tdls_cs;
}; };
/* Extract MVM priv from op_mode and _hw */ /* Extract MVM priv from op_mode and _hw */
...@@ -751,6 +796,7 @@ enum iwl_mvm_status { ...@@ -751,6 +796,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_D3_RECONFIG,
}; };
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
...@@ -759,6 +805,26 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) ...@@ -759,6 +805,26 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
} }
/* Must be called with rcu_read_lock() held and it can only be
* released when mvmsta is not needed anymore.
*/
static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
return NULL;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/* This can happen if the station has been removed right now */
if (IS_ERR_OR_NULL(sta))
return NULL;
return iwl_mvm_sta_from_mac80211(sta);
}
static inline struct iwl_mvm_sta * static inline struct iwl_mvm_sta *
iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{ {
...@@ -832,6 +898,16 @@ int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, ...@@ -832,6 +898,16 @@ int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb_frag);
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
#ifdef CONFIG_IWLWIFI_DEBUG #ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status); const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else #else
...@@ -888,6 +964,8 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, ...@@ -888,6 +964,8 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* MVM PHY */ /* MVM PHY */
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
...@@ -901,6 +979,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, ...@@ -901,6 +979,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt); struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
/* MAC (virtual interface) programming */ /* MAC (virtual interface) programming */
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
...@@ -920,6 +1000,8 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, ...@@ -920,6 +1000,8 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd); struct iwl_device_cmd *cmd);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif);
/* Bindings */ /* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
...@@ -930,6 +1012,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, ...@@ -930,6 +1012,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
struct ieee80211_vif *disabled_vif); struct ieee80211_vif *disabled_vif);
/* Scanning */ /* Scanning */
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_request(struct iwl_mvm *mvm, int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req); struct cfg80211_scan_request *req);
...@@ -970,6 +1053,17 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -970,6 +1053,17 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req, struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies); struct ieee80211_scan_ies *ies);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies);
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
/* MVM debugfs */ /* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
...@@ -1059,6 +1153,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, ...@@ -1059,6 +1153,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
...@@ -1074,12 +1169,14 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, ...@@ -1074,12 +1169,14 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta); struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum ieee80211_band band); enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac); struct ieee80211_tx_info *info, u8 ac);
bool iwl_mvm_bt_coex_is_ant_avail_old(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
...@@ -1195,6 +1292,10 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm); ...@@ -1195,6 +1292,10 @@ bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
/* Thermal management and CT-kill */ /* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
...@@ -1206,12 +1307,33 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1206,12 +1307,33 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool added_vif); bool added_vif);
/* TDLS */ /* TDLS */
/*
* We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
* This TID is marked as used vs the AP and all connected TDLS peers.
*/
#define IWL_MVM_TDLS_FW_TID 4
int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added); bool sta_added);
void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 oper_class,
struct cfg80211_chan_def *chandef,
struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params);
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
......
...@@ -244,6 +244,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -244,6 +244,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_offload_complete_notif, true), iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
false), false),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
true),
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
...@@ -254,6 +256,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -254,6 +256,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
iwl_mvm_power_uapsd_misbehaving_ap_notif, false), iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
true),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
}; };
#undef RX_HANDLER #undef RX_HANDLER
#define CMD(x) [x] = #x #define CMD(x) [x] = #x
...@@ -344,6 +352,13 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { ...@@ -344,6 +352,13 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION), CMD(ANTENNA_COUPLING_NOTIFICATION),
CMD(SCD_QUEUE_CFG), CMD(SCD_QUEUE_CFG),
CMD(SCAN_CFG_CMD),
CMD(SCAN_REQ_UMAC),
CMD(SCAN_ABORT_UMAC),
CMD(SCAN_COMPLETE_UMAC),
CMD(TDLS_CHANNEL_SWITCH_CMD),
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
CMD(TDLS_CONFIG_CMD),
}; };
#undef CMD #undef CMD
...@@ -442,6 +457,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -442,6 +457,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_WORK(&mvm->fw_error_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock); spin_lock_init(&mvm->refs_lock);
...@@ -525,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -525,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
err = iwl_run_init_mvm_ucode(mvm, true); err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
iwl_trans_stop_device(trans); iwl_trans_stop_device(trans);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */ /* returns 0 if successful, 1 if success but in rfkill */
...@@ -534,16 +551,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -534,16 +551,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
} }
} }
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) scan_size = iwl_mvm_scan_size(mvm);
scan_size = sizeof(struct iwl_scan_req_unified_lmac) +
sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_probe_req);
else
scan_size = sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
mvm->fw->ucode_capa.n_scan_channels *
sizeof(struct iwl_scan_channel);
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd) if (!mvm->scan_cmd)
...@@ -597,8 +605,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) ...@@ -597,8 +605,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->nd_config->match_sets); kfree(mvm->nd_config->match_sets);
kfree(mvm->nd_config); kfree(mvm->nd_config);
mvm->nd_config = NULL; mvm->nd_config = NULL;
kfree(mvm->nd_ies);
mvm->nd_ies = NULL;
} }
#endif #endif
...@@ -1050,6 +1056,19 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) ...@@ -1050,6 +1056,19 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
synchronize_net(); synchronize_net();
/*
* iwl_mvm_ref_sync takes a reference before checking the flag.
* so by checking there is no held reference we prevent a state
* in which iwl_mvm_ref_sync continues successfully while we
* configure the firmware to enter d0i3
*/
if (iwl_mvm_ref_taken(mvm)) {
IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
return 1;
}
ieee80211_iterate_active_interfaces_atomic(mvm->hw, ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator, iwl_mvm_enter_d0i3_iterator,
......
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
#include "mvm.h" #include "mvm.h"
/* Maps the driver specific channel width definition to the the fw values */ /* Maps the driver specific channel width definition to the the fw values */
static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
{ {
switch (chandef->width) { switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20_NOHT:
...@@ -90,7 +90,7 @@ static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) ...@@ -90,7 +90,7 @@ static inline u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center * Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values * freq) definitions to the the fw values
*/ */
static inline u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
{ {
switch (chandef->chan->center_freq - chandef->center_freq1) { switch (chandef->chan->center_freq - chandef->center_freq1) {
case -70: case -70:
......
...@@ -286,6 +286,27 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, ...@@ -286,6 +286,27 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
return true; return true;
} }
static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
{
int numerator;
int dtim_interval = dtimper * bi;
if (WARN_ON(!dtim_interval))
return 0;
if (dtimper == 1) {
if (bi > 100)
numerator = 408;
else
numerator = 510;
} else if (dtimper < 10) {
numerator = 612;
} else {
return 0;
}
return max(1, (numerator / dtim_interval));
}
static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
{ {
struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx_conf *chanctx_conf;
...@@ -308,7 +329,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, ...@@ -308,7 +329,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd) struct iwl_mac_power_cmd *cmd)
{ {
int dtimper, dtimper_msec; int dtimper, bi;
int keep_alive; int keep_alive;
bool radar_detect = false; bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused = struct iwl_mvm_vif *mvmvif __maybe_unused =
...@@ -317,6 +338,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, ...@@ -317,6 +338,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)); mvmvif->color));
dtimper = vif->bss_conf.dtim_period; dtimper = vif->bss_conf.dtim_period;
bi = vif->bss_conf.beacon_int;
/* /*
* Regardless of power management state the driver must set * Regardless of power management state the driver must set
...@@ -324,10 +346,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, ...@@ -324,10 +346,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
* immediately after association. Check that keep alive period * immediately after association. Check that keep alive period
* is at least 3 * DTIM * is at least 3 * DTIM
*/ */
dtimper_msec = dtimper * vif->bss_conf.beacon_int; keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
keep_alive = max_t(int, 3 * dtimper_msec, USEC_PER_SEC);
MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC); keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive); cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (mvm->ps_disabled) if (mvm->ps_disabled)
...@@ -352,11 +373,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, ...@@ -352,11 +373,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
radar_detect = iwl_mvm_power_is_radar(vif); radar_detect = iwl_mvm_power_is_radar(vif);
/* Check skip over DTIM conditions */ /* Check skip over DTIM conditions */
if (!radar_detect && (dtimper <= 10) && if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) { mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); cmd->skip_dtim_periods =
cmd->skip_dtim_periods = 3; iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
if (cmd->skip_dtim_periods)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
} }
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
......
...@@ -158,6 +158,12 @@ struct rs_tx_column { ...@@ -158,6 +158,12 @@ struct rs_tx_column {
allow_column_func_t checks[MAX_COLUMN_CHECKS]; allow_column_func_t checks[MAX_COLUMN_CHECKS];
}; };
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl)
{
return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
}
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl) struct iwl_scale_tbl_info *tbl)
{ {
...@@ -218,6 +224,9 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -218,6 +224,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID, RS_COLUMN_INVALID,
RS_COLUMN_INVALID, RS_COLUMN_INVALID,
}, },
.checks = {
rs_ant_allow,
},
}, },
[RS_COLUMN_LEGACY_ANT_B] = { [RS_COLUMN_LEGACY_ANT_B] = {
.mode = RS_LEGACY, .mode = RS_LEGACY,
...@@ -231,6 +240,9 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -231,6 +240,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_INVALID, RS_COLUMN_INVALID,
RS_COLUMN_INVALID, RS_COLUMN_INVALID,
}, },
.checks = {
rs_ant_allow,
},
}, },
[RS_COLUMN_SISO_ANT_A] = { [RS_COLUMN_SISO_ANT_A] = {
.mode = RS_SISO, .mode = RS_SISO,
...@@ -246,6 +258,7 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -246,6 +258,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
}, },
.checks = { .checks = {
rs_siso_allow, rs_siso_allow,
rs_ant_allow,
}, },
}, },
[RS_COLUMN_SISO_ANT_B] = { [RS_COLUMN_SISO_ANT_B] = {
...@@ -262,6 +275,7 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -262,6 +275,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
}, },
.checks = { .checks = {
rs_siso_allow, rs_siso_allow,
rs_ant_allow,
}, },
}, },
[RS_COLUMN_SISO_ANT_A_SGI] = { [RS_COLUMN_SISO_ANT_A_SGI] = {
...@@ -279,6 +293,7 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -279,6 +293,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
}, },
.checks = { .checks = {
rs_siso_allow, rs_siso_allow,
rs_ant_allow,
rs_sgi_allow, rs_sgi_allow,
}, },
}, },
...@@ -297,6 +312,7 @@ static const struct rs_tx_column rs_tx_columns[] = { ...@@ -297,6 +312,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
}, },
.checks = { .checks = {
rs_siso_allow, rs_siso_allow,
rs_ant_allow,
rs_sgi_allow, rs_sgi_allow,
}, },
}, },
...@@ -506,7 +522,7 @@ static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, ...@@ -506,7 +522,7 @@ static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
const char *prefix) const char *prefix)
{ {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC %d\n", "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
prefix, rs_pretty_lq_type(rate->type), prefix, rs_pretty_lq_type(rate->type),
rate->index, rs_pretty_ant(rate->ant), rate->index, rs_pretty_ant(rate->ant),
rate->bw, rate->sgi, rate->ldpc, rate->stbc); rate->bw, rate->sgi, rate->ldpc, rate->stbc);
...@@ -816,7 +832,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, ...@@ -816,7 +832,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (nss == 1) { if (nss == 1) {
rate->type = LQ_VHT_SISO; rate->type = LQ_VHT_SISO;
WARN_ON_ONCE(num_of_ant != 1); WARN_ON_ONCE(!rate->stbc && num_of_ant != 1);
} else if (nss == 2) { } else if (nss == 2) {
rate->type = LQ_VHT_MIMO2; rate->type = LQ_VHT_MIMO2;
WARN_ON_ONCE(num_of_ant != 2); WARN_ON_ONCE(num_of_ant != 2);
...@@ -1110,10 +1126,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -1110,10 +1126,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (time_after(jiffies, if (time_after(jiffies,
(unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) { (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
int tid; int t;
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) for (t = 0; t < IWL_MAX_TID_COUNT; t++)
ieee80211_stop_tx_ba_session(sta, tid); ieee80211_stop_tx_ba_session(sta, t);
iwl_mvm_rs_rate_init(mvm, sta, info->band, false); iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
return; return;
...@@ -1154,16 +1171,15 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -1154,16 +1171,15 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Rate did match, so reset the missed_rate_counter */ /* Rate did match, so reset the missed_rate_counter */
lq_sta->missed_rate_counter = 0; lq_sta->missed_rate_counter = 0;
/* Figure out if rate scale algorithm is in active or search table */ if (!lq_sta->search_better_tbl) {
if (rs_rate_match(&rate,
&(lq_sta->lq_info[lq_sta->active_tbl].rate))) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
} else if (rs_rate_match(&rate, } else {
&lq_sta->lq_info[1 - lq_sta->active_tbl].rate)) {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
} else { }
if (WARN_ON_ONCE(!rs_rate_match(&rate, &curr_tbl->rate))) {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"Neither active nor search matches tx rate\n"); "Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
...@@ -1188,6 +1204,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -1188,6 +1204,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table. * first index into rate scale table.
*/ */
if (info->flags & IEEE80211_TX_STAT_AMPDU) { if (info->flags & IEEE80211_TX_STAT_AMPDU) {
/* ampdu_ack_len = 0 marks no BA was received. In this case
* treat it as a single frame loss as we don't want the success
* ratio to dip too quickly because a BA wasn't received
*/
if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1;
ucode_rate = le32_to_cpu(table->rs_table[0]); ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
rs_collect_tx_data(lq_sta, curr_tbl, rate.index, rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
......
...@@ -96,27 +96,27 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -96,27 +96,27 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* Adds the rxb to a new skb and give it to mac80211 * Adds the rxb to a new skb and give it to mac80211
*/ */
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len, struct ieee80211_hdr *hdr, u16 len,
u32 ampdu_status, u32 ampdu_status, u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb)
struct ieee80211_rx_status *stats)
{ {
struct sk_buff *skb;
unsigned int hdrlen, fraglen; unsigned int hdrlen, fraglen;
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
IWL_ERR(mvm, "alloc_skb failed\n");
return;
}
/* If frame is small enough to fit in skb->head, pull it completely. /* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr so that splice() or TCP coalesce * If not, only pull ieee80211_hdr (including crypto if present, and
* are more efficient. * an additional 8 bytes for SNAP/ethertype, see below) so that
* splice() or TCP coalesce are more efficient.
*
* Since, in addition, ieee80211_data_to_8023() always pull in at
* least 8 bytes (possibly more for mesh) we can do the same here
* to save the cost of doing it later. That still doesn't pull in
* the actual IP header since the typical case has a SNAP header.
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/ */
hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr); hdrlen = (len <= skb_tailroom(skb)) ? len :
sizeof(*hdr) + crypt_len + 8;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen); memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen; fraglen = len - hdrlen;
...@@ -129,8 +129,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, ...@@ -129,8 +129,6 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize); fraglen, rxb->truesize);
} }
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(mvm->hw, skb); ieee80211_rx(mvm->hw, skb);
} }
...@@ -185,7 +183,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, ...@@ -185,7 +183,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
struct ieee80211_hdr *hdr, struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *stats, struct ieee80211_rx_status *stats,
u32 rx_pkt_status) u32 rx_pkt_status,
u8 *crypt_len)
{ {
if (!ieee80211_has_protected(hdr->frame_control) || if (!ieee80211_has_protected(hdr->frame_control) ||
(rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
...@@ -205,12 +204,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, ...@@ -205,12 +204,14 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
stats->flag |= RX_FLAG_DECRYPTED; stats->flag |= RX_FLAG_DECRYPTED;
IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n"); IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
*crypt_len = IEEE80211_CCMP_HDR_LEN;
return 0; return 0;
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */ /* Don't drop the frame and decrypt it in SW */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0; return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */ /* fall through if TTAK OK */
case RX_MPDU_RES_STATUS_SEC_WEP_ENC: case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
...@@ -218,6 +219,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, ...@@ -218,6 +219,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return -1; return -1;
stats->flag |= RX_FLAG_DECRYPTED; stats->flag |= RX_FLAG_DECRYPTED;
if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
RX_MPDU_RES_STATUS_SEC_WEP_ENC)
*crypt_len = IEEE80211_WEP_IV_LEN;
return 0; return 0;
case RX_MPDU_RES_STATUS_SEC_EXT_ENC: case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
...@@ -242,15 +246,17 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -242,15 +246,17 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
{ {
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
struct ieee80211_rx_status rx_status = {}; struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_info *phy_info; struct iwl_rx_phy_info *phy_info;
struct iwl_rx_mpdu_res_start *rx_res; struct iwl_rx_mpdu_res_start *rx_res;
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
struct sk_buff *skb;
u32 len; u32 len;
u32 ampdu_status; u32 ampdu_status;
u32 rate_n_flags; u32 rate_n_flags;
u32 rx_pkt_status; u32 rx_pkt_status;
u8 crypt_len = 0;
phy_info = &mvm->last_phy_info; phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
...@@ -259,20 +265,32 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -259,20 +265,32 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_pkt_status = le32_to_cpup((__le32 *) rx_pkt_status = le32_to_cpup((__le32 *)
(pkt->data + sizeof(*rx_res) + len)); (pkt->data + sizeof(*rx_res) + len));
memset(&rx_status, 0, sizeof(rx_status)); /* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
IWL_ERR(mvm, "alloc_skb failed\n");
return 0;
}
rx_status = IEEE80211_SKB_RXCB(skb);
/* /*
* drop the packet if it has failed being decrypted by HW * drop the packet if it has failed being decrypted by HW
*/ */
if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, &rx_status, rx_pkt_status)) { if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
&crypt_len)) {
IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
rx_pkt_status); rx_pkt_status);
kfree_skb(skb);
return 0; return 0;
} }
if ((unlikely(phy_info->cfg_phy_cnt > 20))) { if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt); phy_info->cfg_phy_cnt);
kfree_skb(skb);
return 0; return 0;
} }
...@@ -283,31 +301,31 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -283,31 +301,31 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
} }
/* This will be used in several places later */ /* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */ /* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_info->timestamp); rx_status->mactime = le64_to_cpu(phy_info->timestamp);
rx_status.device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
rx_status.band = rx_status->band =
(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq = rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
rx_status.band); rx_status->band);
/* /*
* TSF as indicated by the fw is at INA time, but mac80211 expects the * TSF as indicated by the fw is at INA time, but mac80211 expects the
* TSF at the beginning of the MPDU. * TSF at the beginning of the MPDU.
*/ */
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status); iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal, IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
(unsigned long long)rx_status.mactime); (unsigned long long)rx_status->mactime);
rcu_read_lock(); rcu_read_lock();
/* /*
...@@ -326,15 +344,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -326,15 +344,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
if (sta) { if (sta) {
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
&rx_status);
} }
rcu_read_unlock(); rcu_read_unlock();
/* set the preamble flag if appropriate */ /* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
rx_status.flag |= RX_FLAG_SHORTPRE; rx_status->flag |= RX_FLAG_SHORTPRE;
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
/* /*
...@@ -342,8 +359,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -342,8 +359,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
* together since we get a single PHY response * together since we get a single PHY response
* from the firmware for all of them * from the firmware for all of them
*/ */
rx_status.flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status.ampdu_reference = mvm->ampdu_ref; rx_status->ampdu_reference = mvm->ampdu_ref;
} }
/* Set up the HT phy flags */ /* Set up the HT phy flags */
...@@ -351,50 +368,50 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -351,50 +368,50 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
case RATE_MCS_CHAN_WIDTH_20: case RATE_MCS_CHAN_WIDTH_20:
break; break;
case RATE_MCS_CHAN_WIDTH_40: case RATE_MCS_CHAN_WIDTH_40:
rx_status.flag |= RX_FLAG_40MHZ; rx_status->flag |= RX_FLAG_40MHZ;
break; break;
case RATE_MCS_CHAN_WIDTH_80: case RATE_MCS_CHAN_WIDTH_80:
rx_status.vht_flag |= RX_VHT_FLAG_80MHZ; rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
break; break;
case RATE_MCS_CHAN_WIDTH_160: case RATE_MCS_CHAN_WIDTH_160:
rx_status.vht_flag |= RX_VHT_FLAG_160MHZ; rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
break; break;
} }
if (rate_n_flags & RATE_MCS_SGI_MSK) if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI; rx_status->flag |= RX_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK) if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status.flag |= RX_FLAG_HT_GF; rx_status->flag |= RX_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK) if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status.flag |= RX_FLAG_LDPC; rx_status->flag |= RX_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK) { if (rate_n_flags & RATE_MCS_HT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >> u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
RATE_MCS_STBC_POS; RATE_MCS_STBC_POS;
rx_status.flag |= RX_FLAG_HT; rx_status->flag |= RX_FLAG_HT;
rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) { } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >> u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
RATE_MCS_STBC_POS; RATE_MCS_STBC_POS;
rx_status.vht_nss = rx_status->vht_nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1; RATE_VHT_MCS_NSS_POS) + 1;
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT; rx_status->flag |= RX_FLAG_VHT;
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT; rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK) if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status.vht_flag |= RX_VHT_FLAG_BF; rx_status->vht_flag |= RX_VHT_FLAG_BF;
} else { } else {
rx_status.rate_idx = rx_status->rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status.band); rx_status->band);
} }
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags, iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
rx_status.flag & RX_FLAG_AMPDU_DETAILS); rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif #endif
iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status, iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
rxb, &rx_status); crypt_len, rxb);
return 0; return 0;
} }
...@@ -500,29 +517,8 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, ...@@ -500,29 +517,8 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
.mvm = mvm, .mvm = mvm,
}; };
/* iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature));
* set temperature debug enabled - ignore FW temperature updates
* and use the user set temperature.
*/
if (mvm->temperature_test) {
if (mvm->temperature < le32_to_cpu(common->temperature))
IWL_DEBUG_TEMP(mvm,
"Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n",
mvm->temperature,
le32_to_cpu(common->temperature));
/*
* skip iwl_mvm_tt_handler since we are in
* temperature debug mode and we are ignoring
* the new temperature value
*/
goto update;
}
if (mvm->temperature != le32_to_cpu(common->temperature)) {
mvm->temperature = le32_to_cpu(common->temperature);
iwl_mvm_tt_handler(mvm);
}
update:
iwl_mvm_update_rx_statistics(mvm, stats); iwl_mvm_update_rx_statistics(mvm, stats);
ieee80211_iterate_active_interfaces(mvm->hw, ieee80211_iterate_active_interfaces(mvm->hw,
......
...@@ -83,15 +83,29 @@ struct iwl_mvm_scan_params { ...@@ -83,15 +83,29 @@ struct iwl_mvm_scan_params {
} dwell[IEEE80211_NUM_BANDS]; } dwell[IEEE80211_NUM_BANDS];
}; };
enum iwl_umac_scan_uid_type {
IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
IWL_UMAC_SCAN_UID_SCHED_SCAN,
};
static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type, bool notify);
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
return mvm->scan_rx_ant;
return mvm->fw->valid_rx_ant;
}
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{ {
u16 rx_chain; u16 rx_chain;
u8 rx_ant; u8 rx_ant;
if (mvm->scan_rx_ant != ANT_NONE) rx_ant = iwl_mvm_scan_rx_ant(mvm);
rx_ant = mvm->scan_rx_ant;
else
rx_ant = mvm->fw->valid_rx_ant;
rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
...@@ -366,6 +380,10 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm, ...@@ -366,6 +380,10 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
!is_sched_scan) !is_sched_scan)
max_probe_len -= 32; max_probe_len -= 32;
/* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm))
max_probe_len -= 3;
return max_probe_len; return max_probe_len;
} }
...@@ -537,23 +555,17 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, ...@@ -537,23 +555,17 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd) struct iwl_device_cmd *cmd)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
u8 client_bitmap = 0;
if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) &&
!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
struct iwl_sched_scan_results *notif = (void *)pkt->data; struct iwl_sched_scan_results *notif = (void *)pkt->data;
client_bitmap = notif->client_bitmap; if (!(notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN))
return 0;
} }
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw); ieee80211_sched_scan_results(mvm->hw);
} else {
IWL_DEBUG_SCAN(mvm, "Scan results\n");
}
}
return 0; return 0;
} }
...@@ -965,6 +977,20 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, ...@@ -965,6 +977,20 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
return ret; return ret;
} }
static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, n_match_sets %d\n",
req->n_match_sets);
return false;
}
IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
return true;
}
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req) struct cfg80211_sched_scan_request *req)
{ {
...@@ -980,15 +1006,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, ...@@ -980,15 +1006,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
.schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER, .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
}; };
if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { if (iwl_mvm_scan_pass_all(mvm, req))
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, filter len %d\n",
req->n_match_sets);
} else {
IWL_DEBUG_SCAN(mvm,
"Sending Scheduled scan without filtering\n");
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
if (mvm->last_ebs_successful && if (mvm->last_ebs_successful &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
...@@ -1006,12 +1025,19 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, ...@@ -1006,12 +1025,19 @@ int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
{ {
int ret; int ret;
if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) { if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret)
return ret;
ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
} else if ((mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
mvm->scan_status = IWL_MVM_SCAN_SCHED;
ret = iwl_mvm_config_sched_scan_profiles(mvm, req); ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
if (ret) if (ret)
return ret; return ret;
ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
} else { } else {
mvm->scan_status = IWL_MVM_SCAN_SCHED;
ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies); ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
if (ret) if (ret)
return ret; return ret;
...@@ -1068,6 +1094,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ...@@ -1068,6 +1094,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
if (mvm->scan_status != IWL_MVM_SCAN_SCHED && if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) { mvm->scan_status != IWL_MVM_SCAN_OS)) {
...@@ -1155,20 +1185,64 @@ iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, ...@@ -1155,20 +1185,64 @@ iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
} }
} }
static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
size_t len, u8 *const pos)
{
static const u8 before_ds_params[] = {
WLAN_EID_SSID,
WLAN_EID_SUPP_RATES,
WLAN_EID_REQUEST,
WLAN_EID_EXT_SUPP_RATES,
};
size_t offs;
u8 *newpos = pos;
if (!iwl_mvm_rrm_scan_needed(mvm)) {
memcpy(newpos, ies, len);
return newpos + len;
}
offs = ieee80211_ie_split(ies, len,
before_ds_params,
ARRAY_SIZE(before_ds_params),
0);
memcpy(newpos, ies, offs);
newpos += offs;
/* Add a placeholder for DS Parameter Set element */
*newpos++ = WLAN_EID_DS_PARAMS;
*newpos++ = 1;
*newpos++ = 0;
memcpy(newpos, ies + offs, len - offs);
newpos += len - offs;
return newpos;
}
static void static void
iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies, struct ieee80211_scan_ies *ies,
struct iwl_scan_req_unified_lmac *cmd) struct iwl_scan_probe_req *preq,
const u8 *mac_addr, const u8 *mac_addr_mask)
{ {
struct iwl_scan_probe_req *preq = (void *)(cmd->data +
sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels);
struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf; struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
u8 *pos; u8 *pos, *newpos;
/*
* Unfortunately, right now the offload scan doesn't support randomising
* within the firmware, so until the firmware API is ready we implement
* it in the driver. This means that the scan iterations won't really be
* random, only when it's restarted, but at least that helps a bit.
*/
if (mac_addr)
get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
else
memcpy(frame->sa, vif->addr, ETH_ALEN);
frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
eth_broadcast_addr(frame->da); eth_broadcast_addr(frame->da);
memcpy(frame->sa, vif->addr, ETH_ALEN);
eth_broadcast_addr(frame->bssid); eth_broadcast_addr(frame->bssid);
frame->seq_ctrl = 0; frame->seq_ctrl = 0;
...@@ -1179,11 +1253,14 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1179,11 +1253,14 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
preq->mac_header.offset = 0; preq->mac_header.offset = 0;
preq->mac_header.len = cpu_to_le16(24 + 2); preq->mac_header.len = cpu_to_le16(24 + 2);
memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ], /* Insert ds parameter set element on 2.4 GHz band */
ies->len[IEEE80211_BAND_2GHZ]); newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
ies->ies[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
pos);
preq->band_data[0].offset = cpu_to_le16(pos - preq->buf); preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]); preq->band_data[0].len = cpu_to_le16(newpos - pos);
pos += ies->len[IEEE80211_BAND_2GHZ]; pos = newpos;
memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ], memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ]); ies->len[IEEE80211_BAND_5GHZ]);
...@@ -1244,9 +1321,10 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, ...@@ -1244,9 +1321,10 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, },
}; };
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd; struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {}; struct iwl_mvm_scan_params params = {};
u32 flags; u32 flags;
int ssid_bitmap = 0; u32 ssid_bitmap = 0;
int ret, i; int ret, i;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -1305,7 +1383,13 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, ...@@ -1305,7 +1383,13 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
req->req.n_channels, ssid_bitmap, req->req.n_channels, ssid_bitmap,
cmd); cmd);
iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd); preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels);
iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
req->req.mac_addr : NULL,
req->req.mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd); ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) { if (!ret) {
...@@ -1338,6 +1422,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1338,6 +1422,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, },
}; };
struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd; struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
struct iwl_scan_probe_req *preq;
struct iwl_mvm_scan_params params = {}; struct iwl_mvm_scan_params params = {};
int ret; int ret;
u32 flags = 0, ssid_bitmap = 0; u32 flags = 0, ssid_bitmap = 0;
...@@ -1361,15 +1446,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1361,15 +1446,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->n_channels = (u8)req->n_channels; cmd->n_channels = (u8)req->n_channels;
if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { if (iwl_mvm_scan_pass_all(mvm, req))
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, n_match_sets %d\n",
req->n_match_sets);
} else {
IWL_DEBUG_SCAN(mvm,
"Sending Scheduled scan without filtering\n");
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
}
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
...@@ -1399,7 +1477,13 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1399,7 +1477,13 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd); ssid_bitmap, cmd);
iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd); preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels);
iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
req->mac_addr : NULL,
req->mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd); ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) { if (!ret) {
...@@ -1421,6 +1505,10 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, ...@@ -1421,6 +1505,10 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{ {
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
true);
if (mvm->scan_status == IWL_MVM_SCAN_NONE) if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return 0; return 0;
...@@ -1435,3 +1523,576 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) ...@@ -1435,3 +1523,576 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
return iwl_mvm_scan_offload_stop(mvm, true); return iwl_mvm_scan_offload_stop(mvm, true);
return iwl_mvm_cancel_regular_scan(mvm); return iwl_mvm_cancel_regular_scan(mvm);
} }
/* UMAC scan API */
struct iwl_umac_scan_done {
struct iwl_mvm *mvm;
enum iwl_umac_scan_uid_type type;
};
static int rate_to_scan_rate_flag(unsigned int rate)
{
static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
[IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
[IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
[IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
[IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
[IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
[IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
[IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
[IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
[IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
[IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
[IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
[IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
};
return rate_to_scan_rate[rate];
}
static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
{
struct ieee80211_supported_band *band;
unsigned int rates = 0;
int i;
band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
/* Set both basic rates and supported rates */
rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
return cpu_to_le32(rates);
}
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
{
struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band;
int num_channels =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int ret, i, j = 0, cmd_size, data_size;
struct iwl_host_cmd cmd = {
.id = SCAN_CFG_CMD,
};
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
scan_config = kzalloc(cmd_size, GFP_KERNEL);
if (!scan_config)
return -ENOMEM;
data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
scan_config->hdr.size = cpu_to_le16(data_size);
scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
SCAN_CONFIG_N_CHANNELS(num_channels));
scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant);
scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
scan_config->out_of_channel_time = cpu_to_le32(170);
scan_config->suspend_time = cpu_to_le32(30);
scan_config->dwell_active = 20;
scan_config->dwell_passive = 110;
scan_config->dwell_fragmented = 20;
memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
IWL_CHANNEL_FLAG_ACCURATE_EBS |
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].center_freq;
band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].center_freq;
cmd.data[0] = scan_config;
cmd.len[0] = cmd_size;
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
kfree(scan_config);
return ret;
}
static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
{
int i;
for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
if (mvm->scan_uid[i] == uid)
return i;
return i;
}
static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
{
return iwl_mvm_find_scan_uid(mvm, 0);
}
static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
int i;
for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
if (mvm->scan_uid[i] & type)
return true;
return false;
}
static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type)
{
u32 uid;
/* make sure exactly one bit is on in scan type */
WARN_ON(hweight8(type) != 1);
/*
* Make sure scan uids are unique. If one scan lasts long time while
* others are completing frequently, the seq number will wrap up and
* we may have more than one scan with the same uid.
*/
do {
uid = type | (mvm->scan_seq_num <<
IWL_UMAC_SCAN_UID_SEQ_OFFSET);
mvm->scan_seq_num++;
} while (iwl_mvm_find_scan_uid(mvm, uid) <
IWL_MVM_MAX_SIMULTANEOUS_SCANS);
IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
return uid;
}
static void
iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
memset(cmd, 0, ksize(cmd));
cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
sizeof(struct iwl_mvm_umac_cmd_hdr));
cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
cmd->fragmented_dwell =
params->dwell[IEEE80211_BAND_2GHZ].passive;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
}
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
int n_channels, u32 ssid_bitmap,
struct iwl_scan_req_umac *cmd)
{
struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
int i;
for (i = 0; i < n_channels; i++) {
channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
channel_cfg[i].channel_num = channels[i]->hw_value;
channel_cfg[i].iter_count = 1;
channel_cfg[i].iter_interval = 0;
}
}
static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
struct cfg80211_ssid *ssids,
int fragmented)
{
int flags = 0;
if (n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
if (n_ssids == 1 && ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
if (fragmented)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
return flags;
}
int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req)
{
struct iwl_host_cmd hcmd = {
.id = SCAN_REQ_UMAC,
.len = { iwl_mvm_scan_size(mvm), },
.data = { mvm->scan_cmd, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_mvm_scan_params params = {};
u32 uid, flags;
u32 ssid_bitmap = 0;
int ret, i, uid_idx;
lockdep_assert_held(&mvm->mutex);
uid_idx = iwl_mvm_find_free_scan_uid(mvm);
if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
return -EBUSY;
/* we should have failed registration if scan_cmd was NULL */
if (WARN_ON(mvm->scan_cmd == NULL))
return -ENOMEM;
if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
req->ies.common_ie_len +
req->ies.len[NL80211_BAND_2GHZ] +
req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
&params);
iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
mvm->scan_uid[uid_idx] = uid;
cmd->uid = cpu_to_le32(uid);
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
req->req.ssids,
params.passive_fragmented);
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
cmd->general_flags = cpu_to_le32(flags);
cmd->n_channels = req->req.n_channels;
for (i = 0; i < req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
req->req.n_channels, ssid_bitmap, cmd);
sec_part->schedule[0].iter_count = 1;
sec_part->delay = 0;
iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
req->req.mac_addr : NULL,
req->req.mac_addr_mask);
iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
req->req.n_ssids, 0);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
"Scan request was sent successfully\n");
} else {
/*
* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
}
return ret;
}
int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct iwl_host_cmd hcmd = {
.id = SCAN_REQ_UMAC,
.len = { iwl_mvm_scan_size(mvm), },
.data = { mvm->scan_cmd, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_mvm_scan_params params = {};
u32 uid, flags;
u32 ssid_bitmap = 0;
int ret, uid_idx;
lockdep_assert_held(&mvm->mutex);
uid_idx = iwl_mvm_find_free_scan_uid(mvm);
if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
return -EBUSY;
/* we should have failed registration if scan_cmd was NULL */
if (WARN_ON(mvm->scan_cmd == NULL))
return -ENOMEM;
if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
&params);
iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
mvm->scan_uid[uid_idx] = uid;
cmd->uid = cpu_to_le32(uid);
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
params.passive_fragmented);
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
else
flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
cmd->general_flags = cpu_to_le32(flags);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
mvm->last_ebs_successful)
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
cmd->n_channels = req->n_channels;
iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
false);
/* This API uses bits 0-19 instead of 1-20. */
ssid_bitmap = ssid_bitmap >> 1;
iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
sec_part->schedule[0].interval =
cpu_to_le16(req->interval / MSEC_PER_SEC);
sec_part->schedule[0].iter_count = 0xff;
sec_part->delay = 0;
iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
req->mac_addr : NULL,
req->mac_addr_mask);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
"Sched scan request was sent successfully\n");
} else {
/*
* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
}
return ret;
}
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
/*
* Scan uid may be set to zero in case of scan abort request from above.
*/
if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
return 0;
IWL_DEBUG_SCAN(mvm,
"Scan completed, uid %u type %s, status %s, EBS status %s\n",
uid, sched ? "sched" : "regular",
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
"success" : "failed");
mvm->last_ebs_successful = !notif->ebs_status;
mvm->scan_uid[uid_idx] = 0;
if (!sched) {
ieee80211_scan_completed(mvm->hw,
notif->status ==
IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
ieee80211_sched_scan_stopped(mvm->hw);
} else {
IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
}
return 0;
}
static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_umac_scan_done *scan_done = data;
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
return false;
if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
return false;
/*
* Clear scan uid of scans that was aborted from above and completed
* in FW so the RX handler does nothing.
*/
scan_done->mvm->scan_uid[uid_idx] = 0;
return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
}
static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
{
struct iwl_umac_scan_abort cmd = {
.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
sizeof(struct iwl_mvm_umac_cmd_hdr)),
.uid = cpu_to_le32(uid),
};
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
}
static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
enum iwl_umac_scan_uid_type type, bool notify)
{
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
struct iwl_umac_scan_done scan_done = {
.mvm = mvm,
.type = type,
};
int i, ret = -EIO;
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
iwl_scan_umac_done_check, &scan_done);
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
if (mvm->scan_uid[i] & type) {
int err;
if (iwl_mvm_is_radio_killed(mvm) &&
(type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
break;
}
err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
if (!err)
ret = 0;
}
}
if (ret) {
IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
if (ret)
return ret;
if (notify) {
if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
ieee80211_sched_scan_stopped(mvm->hw);
if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
}
return ret;
}
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
return sizeof(struct iwl_scan_req_umac) +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
return sizeof(struct iwl_scan_req_unified_lmac) +
sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_probe_req);
return sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
mvm->fw->ucode_capa.n_scan_channels *
sizeof(struct iwl_scan_channel);
}
...@@ -204,6 +204,56 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -204,6 +204,56 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret; return ret;
} }
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
unsigned long used_hw_queues;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
u32 ac;
lockdep_assert_held(&mvm->mutex);
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
/* Find available queues, and allocate them to the ACs */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
if (queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate STA queue\n");
return -EBUSY;
}
__set_bit(queue, &used_hw_queues);
mvmsta->hw_queue[ac] = queue;
}
/* Found a place for all queues - enable them */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac]);
mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
}
return 0;
}
static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
unsigned long sta_msk;
int i;
lockdep_assert_held(&mvm->mutex);
/* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk;
for_each_set_bit(i, &sta_msk, sizeof(sta_msk))
iwl_mvm_disable_txq(mvm, i);
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
...@@ -237,9 +287,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -237,9 +287,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
atomic_set(&mvm->pending_frames[sta_id], 0); atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0; mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0; mvm_sta->tfd_queue_msk = 0;
/* allocate new queues for a TDLS station */
if (sta->tdls) {
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
} else {
for (i = 0; i < IEEE80211_NUM_ACS; i++) for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
/* for HW restart - reset everything but the sequence number */ /* for HW restart - reset everything but the sequence number */
for (i = 0; i < IWL_MAX_TID_COUNT; i++) { for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
...@@ -251,7 +309,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -251,7 +309,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret) if (ret)
return ret; goto err;
if (vif->type == NL80211_IFTYPE_STATION) { if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) { if (!sta->tdls) {
...@@ -265,6 +323,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -265,6 +323,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
return 0; return 0;
err:
iwl_mvm_tdls_sta_deinit(mvm, sta);
return ret;
} }
int iwl_mvm_update_sta(struct iwl_mvm *mvm, int iwl_mvm_update_sta(struct iwl_mvm *mvm,
...@@ -398,6 +460,17 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) ...@@ -398,6 +460,17 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
} }
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained); clear_bit(sta_id, mvm->sta_drained);
if (mvm->tfd_drained[sta_id]) {
unsigned long i, msk = mvm->tfd_drained[sta_id];
for_each_set_bit(i, &msk, sizeof(msk))
iwl_mvm_disable_txq(mvm, i);
mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
sta_id, msk);
}
} }
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
...@@ -430,6 +503,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -430,6 +503,15 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
} }
/*
* This shouldn't happen - the TDLS channel switch should be canceled
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
/* /*
* Make sure that the tx response code sees the station as -EBUSY and * Make sure that the tx response code sees the station as -EBUSY and
* calls the drain worker. * calls the drain worker.
...@@ -443,9 +525,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -443,9 +525,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY)); ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock); spin_unlock_bh(&mvm_sta->lock);
/* disable TDLS sta queues on drain complete */
if (sta->tdls) {
mvm->tfd_drained[mvm_sta->sta_id] =
mvm_sta->tfd_queue_msk;
IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
mvm_sta->sta_id);
}
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else { } else {
spin_unlock_bh(&mvm_sta->lock); spin_unlock_bh(&mvm_sta->lock);
if (sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
} }
...@@ -1071,15 +1166,16 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, ...@@ -1071,15 +1166,16 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta, struct iwl_mvm_sta *mvm_sta,
struct ieee80211_key_conf *keyconf, struct ieee80211_key_conf *keyconf, bool mcast,
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags)
u32 cmd_flags)
{ {
struct iwl_mvm_add_sta_key_cmd cmd = {}; struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags; __le16 key_flags;
int ret, status; int ret;
u32 status;
u16 keyidx; u16 keyidx;
int i; int i;
u8 sta_id = mvm_sta->sta_id;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK; STA_KEY_FLG_KEYID_MSK;
...@@ -1098,12 +1194,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, ...@@ -1098,12 +1194,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
memcpy(cmd.key, keyconf->key, keyconf->keylen); memcpy(cmd.key, keyconf->key, keyconf->keylen);
break; break;
case WLAN_CIPHER_SUITE_WEP104:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
case WLAN_CIPHER_SUITE_WEP40:
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
break;
default: default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen); memcpy(cmd.key, keyconf->key, keyconf->keylen);
} }
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST); key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_offset = keyconf->hw_key_idx; cmd.key_offset = keyconf->hw_key_idx;
...@@ -1195,18 +1297,89 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, ...@@ -1195,18 +1297,89 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return NULL; return NULL;
} }
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf, struct ieee80211_key_conf *keyconf,
bool have_key_offset) bool mcast)
{ {
struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int ret; int ret;
u8 *addr, sta_id; const u8 *addr;
struct ieee80211_key_seq seq; struct ieee80211_key_seq seq;
u16 p1k[5]; u16 p1k[5];
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
/* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
0, NULL, 0);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
0, NULL, 0);
}
return ret;
}
static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_key_conf *keyconf,
bool mcast)
{
struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
int ret;
u32 status;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK);
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
if (mcast)
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_flags = key_flags;
cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
&cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
break;
default:
ret = -EIO;
IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
break;
}
return ret;
}
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf,
bool have_key_offset)
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
u8 sta_id;
int ret;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* Get the station id from the mvm local station table */ /* Get the station id from the mvm local station table */
...@@ -1234,8 +1407,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ...@@ -1234,8 +1407,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
} }
} }
mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
if (WARN_ON_ONCE(mvm_sta->vif != vif))
return -EINVAL; return -EINVAL;
if (!have_key_offset) { if (!have_key_offset) {
...@@ -1249,26 +1421,26 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ...@@ -1249,26 +1421,26 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
return -ENOSPC; return -ENOSPC;
} }
switch (keyconf->cipher) { ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast);
case WLAN_CIPHER_SUITE_TKIP: if (ret) {
addr = iwl_mvm_get_mac_addr(mvm, vif, sta); __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
/* get phase 1 key from mac80211 */ goto end;
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
0, NULL, 0);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
sta_id, 0, NULL, 0);
} }
if (ret) /*
* For WEP, the same key is used for multicast and unicast. Upload it
* again, using the same key offset, and now pointing the other one
* to the same key slot (offset).
* If this fails, remove the original as well.
*/
if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast);
if (ret) {
__clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
}
}
end: end:
IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
...@@ -1282,11 +1454,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, ...@@ -1282,11 +1454,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf) struct ieee80211_key_conf *keyconf)
{ {
struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_add_sta_key_cmd cmd = {};
__le16 key_flags;
int ret, status;
u8 sta_id; u8 sta_id;
int ret;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -1299,8 +1469,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, ...@@ -1299,8 +1469,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
ret = __test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
if (!ret) {
IWL_ERR(mvm, "offset %d not used in fw key table.\n", IWL_ERR(mvm, "offset %d not used in fw key table.\n",
keyconf->hw_key_idx); keyconf->hw_key_idx);
return -ENOENT; return -ENOENT;
...@@ -1326,35 +1495,17 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, ...@@ -1326,35 +1495,17 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
} }
} }
mvm_sta = (struct iwl_mvm_sta *)sta->drv_priv; if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
if (WARN_ON_ONCE(mvm_sta->vif != vif))
return -EINVAL; return -EINVAL;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
STA_KEY_FLG_KEYID_MSK); if (ret)
key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); return ret;
key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
cmd.key_flags = key_flags;
cmd.key_offset = keyconf->hw_key_idx;
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
&cmd, &status);
switch (status) { /* delete WEP key twice to get rid of (now useless) offset */
case ADD_STA_SUCCESS: if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
break; ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
default:
ret = -EIO;
IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
break;
}
return ret; return ret;
} }
...@@ -1367,6 +1518,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, ...@@ -1367,6 +1518,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
{ {
struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_sta *mvm_sta;
u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta);
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
return; return;
...@@ -1381,8 +1533,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, ...@@ -1381,8 +1533,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
} }
} }
mvm_sta = (void *)sta->drv_priv; mvm_sta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id, iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
iv32, phase1key, CMD_ASYNC); iv32, phase1key, CMD_ASYNC);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1580,3 +1732,18 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, ...@@ -1580,3 +1732,18 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
} }
} }
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta;
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
if (!WARN_ON(!mvmsta))
iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
rcu_read_unlock();
}
...@@ -264,6 +264,7 @@ enum iwl_mvm_agg_state { ...@@ -264,6 +264,7 @@ enum iwl_mvm_agg_state {
* the first packet to be sent in legacy HW queue in Tx AGG stop flow. * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow. * we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
*/ */
struct iwl_mvm_tid_data { struct iwl_mvm_tid_data {
u16 seq_number; u16 seq_number;
...@@ -274,6 +275,7 @@ struct iwl_mvm_tid_data { ...@@ -274,6 +275,7 @@ struct iwl_mvm_tid_data {
enum iwl_mvm_agg_state state; enum iwl_mvm_agg_state state;
u16 txq_id; u16 txq_id;
u16 ssn; u16 ssn;
u16 tx_time;
}; };
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
...@@ -286,6 +288,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) ...@@ -286,6 +288,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* struct iwl_mvm_sta - representation of a station in the driver * struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station * @tfd_queue_msk: the tfd queues used by the station
* @hw_queue: per-AC mapping of the TFD queues used by station
* @mac_id_n_color: the MAC context this station is linked to * @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid. * tid.
...@@ -309,6 +312,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) ...@@ -309,6 +312,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
struct iwl_mvm_sta { struct iwl_mvm_sta {
u32 sta_id; u32 sta_id;
u32 tfd_queue_msk; u32 tfd_queue_msk;
u8 hw_queue[IEEE80211_NUM_ACS];
u32 mac_id_n_color; u32 mac_id_n_color;
u16 tid_disable_agg; u16 tid_disable_agg;
u8 max_agg_bufsize; u8 max_agg_bufsize;
...@@ -418,5 +422,6 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, ...@@ -418,5 +422,6 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif, struct iwl_mvm_vif *mvmvif,
bool disable); bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#endif /* __sta_h__ */ #endif /* __sta_h__ */
...@@ -61,9 +61,13 @@ ...@@ -61,9 +61,13 @@
* *
*****************************************************************************/ *****************************************************************************/
#include <linux/etherdevice.h>
#include "mvm.h" #include "mvm.h"
#include "time-event.h" #include "time-event.h"
#define TU_TO_US(x) (x * 1024)
#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
{ {
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
...@@ -113,17 +117,85 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) ...@@ -113,17 +117,85 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return count; return count;
} }
static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_rx_packet *pkt;
struct iwl_tdls_config_res *resp;
struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
struct iwl_host_cmd cmd = {
.id = TDLS_CONFIG_CMD,
.flags = CMD_WANT_SKB,
.data = { &tdls_cfg_cmd, },
.len = { sizeof(struct iwl_tdls_config_cmd), },
};
struct ieee80211_sta *sta;
int ret, i, cnt;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
tdls_cfg_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
/* for now the Tx cmd is empty and unused */
/* populate TDLS peer data */
cnt = 0;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
continue;
tdls_cfg_cmd.sta_info[cnt].sta_id = i;
tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
IWL_MVM_TDLS_FW_TID;
tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
tdls_cfg_cmd.sta_info[cnt].is_initiator =
cpu_to_le32(sta->tdls_initiator ? 1 : 0);
cnt++;
}
tdls_cfg_cmd.tdls_peer_count = cnt;
IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (WARN_ON_ONCE(ret))
return;
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
pkt->hdr.flags);
goto exit;
}
if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
goto exit;
/* we don't really care about the response at this point */
exit:
iwl_free_resp(&cmd);
}
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added) bool sta_added)
{ {
int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
/* /* when the first peer joins, send a power update first */
* Disable ps when the first TDLS sta is added and re-enable it if (tdls_sta_cnt == 1 && sta_added)
* when the last TDLS sta is removed iwl_mvm_power_update_mac(mvm);
*/
if ((tdls_sta_cnt == 1 && sta_added) || /* configure the FW with TDLS peer info */
(tdls_sta_cnt == 0 && !sta_added)) iwl_mvm_tdls_config(mvm, vif);
/* when the last peer leaves, send a power update last */
if (tdls_sta_cnt == 0 && !sta_added)
iwl_mvm_power_update_mac(mvm); iwl_mvm_power_update_mac(mvm);
} }
...@@ -147,3 +219,488 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, ...@@ -147,3 +219,488 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
} }
static const char *
iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
{
switch (state) {
case IWL_MVM_TDLS_SW_IDLE:
return "IDLE";
case IWL_MVM_TDLS_SW_REQ_SENT:
return "REQ SENT";
case IWL_MVM_TDLS_SW_REQ_RCVD:
return "REQ RECEIVED";
case IWL_MVM_TDLS_SW_ACTIVE:
return "ACTIVE";
}
return NULL;
}
static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
enum iwl_mvm_tdls_cs_state state)
{
if (mvm->tdls_cs.state == state)
return;
IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
iwl_mvm_tdls_cs_state_str(state));
mvm->tdls_cs.state = state;
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
}
int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
struct ieee80211_sta *sta;
unsigned int delay;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_vif *vif;
u32 sta_id = le32_to_cpu(notif->sta_id);
lockdep_assert_held(&mvm->mutex);
/* can fail sometimes */
if (!le32_to_cpu(notif->status)) {
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
goto out;
}
if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
goto out;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
/*
* Update state and possibly switch again after this is over (DTIM).
* Also convert TU to msec.
*/
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
out:
return 0;
}
static int
iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
enum iwl_tdls_channel_switch_type type,
const u8 *peer, bool peer_initiator)
{
bool same_peer = false;
int ret = 0;
/* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
if (!IS_ERR_OR_NULL(sta))
same_peer = ether_addr_equal(peer, sta->addr);
}
switch (mvm->tdls_cs.state) {
case IWL_MVM_TDLS_SW_IDLE:
/*
* might be spurious packet from the peer after the switch is
* already done
*/
if (type == TDLS_MOVE_CH)
ret = -EINVAL;
break;
case IWL_MVM_TDLS_SW_REQ_SENT:
/*
* We received a ch-switch request while an outgoing one is
* pending. Allow it to proceed if the other peer is the same
* one we sent to, and we are not the link initiator.
*/
if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) {
if (!same_peer)
ret = -EBUSY;
else if (!peer_initiator) /* we are the initiator */
ret = -EBUSY;
}
break;
case IWL_MVM_TDLS_SW_REQ_RCVD:
/* as above, allow the link initiator to proceed */
if (type == TDLS_SEND_CHAN_SW_REQ) {
if (!same_peer)
ret = -EBUSY;
else if (peer_initiator) /* they are the initiator */
ret = -EBUSY;
} else if (type == TDLS_MOVE_CH) {
ret = -EINVAL;
}
break;
case IWL_MVM_TDLS_SW_ACTIVE:
/* we don't allow initiations during active channel switch */
if (type == TDLS_SEND_CHAN_SW_REQ)
ret = -EINVAL;
break;
}
if (ret)
IWL_DEBUG_TDLS(mvm,
"Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
type, mvm->tdls_cs.state, peer, same_peer,
peer_initiator);
return ret;
}
static int
iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum iwl_tdls_channel_switch_type type,
const u8 *peer, bool peer_initiator,
u8 oper_class,
struct cfg80211_chan_def *chandef,
u32 timestamp, u16 switch_time,
u16 switch_timeout, struct sk_buff *skb,
u32 ch_sw_tm_ie)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct iwl_tdls_channel_switch_cmd cmd = {0};
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator);
if (ret)
return ret;
if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
ret = -EINVAL;
goto out;
}
cmd.switch_type = type;
cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
cmd.timing.switch_time = cpu_to_le32(switch_time);
cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
rcu_read_lock();
sta = ieee80211_find_sta(vif, peer);
if (!sta) {
rcu_read_unlock();
ret = -ENOENT;
goto out;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
if (!chandef) {
if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
mvm->tdls_cs.peer.chandef.chan) {
/* actually moving to the channel */
chandef = &mvm->tdls_cs.peer.chandef;
} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
type == TDLS_MOVE_CH) {
/* we need to return to base channel */
struct ieee80211_chanctx_conf *chanctx =
rcu_dereference(vif->chanctx_conf);
if (WARN_ON_ONCE(!chanctx)) {
rcu_read_unlock();
goto out;
}
chandef = &chanctx->def;
}
}
if (chandef) {
cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd.ci.channel = chandef->chan->hw_value;
cmd.ci.width = iwl_mvm_get_channel_width(chandef);
cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
}
/* keep quota calculation simple for now - 50% of DTIM for TDLS */
cmd.timing.max_offchan_duration =
cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int) / 2);
/* Switch time is the first element in the switch-timing IE. */
cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key)
iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
mvmsta->sta_id);
hdr = (void *)skb->data;
iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
memcpy(cmd.frame.data, skb->data, skb->len);
ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
sizeof(cmd), &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
ret);
goto out;
}
/* channel switch has started, update state */
if (type != TDLS_MOVE_CH) {
mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
iwl_mvm_tdls_update_cs_state(mvm,
type == TDLS_SEND_CHAN_SW_REQ ?
IWL_MVM_TDLS_SW_REQ_SENT :
IWL_MVM_TDLS_SW_REQ_RCVD);
}
out:
/* channel switch failed - we are idle */
if (ret)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
return ret;
}
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
{
struct iwl_mvm *mvm;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_vif *vif;
unsigned int delay;
int ret;
mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
mutex_lock(&mvm->mutex);
/* called after an active channel switch has finished or timed-out */
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT)
goto out;
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
TDLS_SEND_CHAN_SW_REQ,
sta->addr,
mvm->tdls_cs.peer.initiator,
mvm->tdls_cs.peer.op_class,
&mvm->tdls_cs.peer.chandef,
0, 0, 0,
mvm->tdls_cs.peer.skb,
mvm->tdls_cs.peer.ch_sw_tm_ie);
if (ret)
IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
/* retry after a DTIM if we failed sending now */
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
out:
mutex_unlock(&mvm->mutex);
}
int
iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 oper_class,
struct cfg80211_chan_def *chandef,
struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta;
unsigned int delay;
int ret;
mutex_lock(&mvm->mutex);
IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_STATION_COUNT) {
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
ret = -EBUSY;
goto out;
}
ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
TDLS_SEND_CHAN_SW_REQ,
sta->addr, sta->tdls_initiator,
oper_class, chandef, 0, 0, 0,
tmpl_skb, ch_sw_tm_ie);
if (ret)
goto out;
/*
* Mark the peer as "in tdls switch" for this vif. We only allow a
* single such peer per vif.
*/
mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
if (!mvm->tdls_cs.peer.skb) {
ret = -ENOMEM;
goto out;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
mvm->tdls_cs.peer.chandef = *chandef;
mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
mvm->tdls_cs.peer.op_class = oper_class;
mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
/*
* Wait for 2 DTIM periods before attempting the next switch. The next
* switch will be made sooner if the current one completes before that.
*/
delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
out:
mutex_unlock(&mvm->mutex);
return ret;
}
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_sta *cur_sta;
bool wait_for_phy = false;
mutex_lock(&mvm->mutex);
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_STATION_COUNT) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out;
}
cur_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* make sure it's the same peer */
if (cur_sta != sta)
goto out;
/*
* If we're currently in a switch because of the now canceled peer,
* wait a DTIM here to make sure the phy is back on the base channel.
* We can't otherwise force it.
*/
if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true;
mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL;
out:
mutex_unlock(&mvm->mutex);
/* make sure the phy is on the base channel */
if (wait_for_phy)
msleep(TU_TO_MS(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int));
/* flush the channel switch state */
flush_delayed_work(&mvm->tdls_cs.dwork);
IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
}
void
iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
enum iwl_tdls_channel_switch_type type;
unsigned int delay;
mutex_lock(&mvm->mutex);
IWL_DEBUG_TDLS(mvm,
"Received TDLS ch switch action %d from %pM status %d\n",
params->action_code, params->sta->addr, params->status);
/*
* we got a non-zero status from a peer we were switching to - move to
* the idle state and retry again later
*/
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_STATION_COUNT) {
struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */
cur_sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
if (cur_sta == params->sta) {
iwl_mvm_tdls_update_cs_state(mvm,
IWL_MVM_TDLS_SW_IDLE);
goto retry;
}
}
type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
params->sta->tdls_initiator, 0,
params->chandef, params->timestamp,
params->switch_time,
params->switch_timeout,
params->tmpl_skb,
params->ch_sw_tm_ie);
retry:
/* register a timeout in case we don't succeed in switching */
delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
1024 / 1000;
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
mutex_unlock(&mvm->mutex);
}
...@@ -191,6 +191,35 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, ...@@ -191,6 +191,35 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
return true; return true;
} }
static void
iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_notif *notif)
{
if (!le32_to_cpu(notif->status)) {
IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
iwl_mvm_te_clear_data(mvm, te_data);
return;
}
switch (te_data->vif->type) {
case NL80211_IFTYPE_AP:
iwl_mvm_csa_noa_start(mvm);
break;
case NL80211_IFTYPE_STATION:
iwl_mvm_csa_client_absent(mvm, te_data->vif);
ieee80211_chswitch_done(te_data->vif, true);
break;
default:
/* should never happen */
WARN_ON_ONCE(1);
break;
}
/* we don't need it anymore */
iwl_mvm_te_clear_data(mvm, te_data);
}
/* /*
* Handles a FW notification for an event that is known to the driver. * Handles a FW notification for an event that is known to the driver.
* *
...@@ -252,14 +281,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, ...@@ -252,14 +281,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
iwl_mvm_ref(mvm, IWL_MVM_REF_ROC); iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw); ieee80211_ready_on_channel(mvm->hw);
} else if (te_data->vif->type == NL80211_IFTYPE_AP) { } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
if (le32_to_cpu(notif->status)) iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
iwl_mvm_csa_noa_start(mvm);
else
IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n");
/* we don't need it anymore */
iwl_mvm_te_clear_data(mvm, te_data);
} }
} else { } else {
IWL_WARN(mvm, "Got TE with unknown action\n"); IWL_WARN(mvm, "Got TE with unknown action\n");
...@@ -549,18 +572,11 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, ...@@ -549,18 +572,11 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
} }
} }
/* static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
* Explicit request to remove a time event. The removal of a time event needs to struct iwl_mvm_time_event_data *te_data,
* be synchronized with the flow of a time event's end notification, which also u32 *uid)
* removes the time event from the op mode data structures.
*/
void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{ {
struct iwl_time_event_cmd time_cmd = {}; u32 id;
u32 id, uid;
int ret;
/* /*
* It is possible that by the time we got to this point the time * It is possible that by the time we got to this point the time
...@@ -569,7 +585,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, ...@@ -569,7 +585,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
spin_lock_bh(&mvm->time_event_lock); spin_lock_bh(&mvm->time_event_lock);
/* Save time event uid before clearing its data */ /* Save time event uid before clearing its data */
uid = te_data->uid; *uid = te_data->uid;
id = te_data->id; id = te_data->id;
/* /*
...@@ -584,10 +600,59 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, ...@@ -584,10 +600,59 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
* send a removal command. * send a removal command.
*/ */
if (id == TE_MAX) { if (id == TE_MAX) {
IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", uid); IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
return; return false;
} }
return true;
}
/*
* Explicit request to remove a aux roc time event. The removal of a time
* event needs to be synchronized with the flow of a time event's end
* notification, which also removes the time event from the op mode
* data structures.
*/
static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
struct iwl_hs20_roc_req aux_cmd = {};
u32 uid;
int ret;
if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
return;
aux_cmd.event_unique_id = cpu_to_le32(uid);
aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
aux_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
le32_to_cpu(aux_cmd.event_unique_id));
ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
sizeof(aux_cmd), &aux_cmd);
if (WARN_ON(ret))
return;
}
/*
* Explicit request to remove a time event. The removal of a time event needs to
* be synchronized with the flow of a time event's end notification, which also
* removes the time event from the op mode data structures.
*/
void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
struct iwl_time_event_cmd time_cmd = {};
u32 uid;
int ret;
if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
return;
/* When we remove a TE, the UID is to be set in the id field */ /* When we remove a TE, the UID is to be set in the id field */
time_cmd.id = cpu_to_le32(uid); time_cmd.id = cpu_to_le32(uid);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
...@@ -666,13 +731,17 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -666,13 +731,17 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
} }
void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{ {
struct iwl_mvm_vif *mvmvif; struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data; struct iwl_mvm_time_event_data *te_data;
bool is_p2p = false;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
mvmvif = NULL;
spin_lock_bh(&mvm->time_event_lock);
/* /*
* Iterate over the list of time events and find the time event that is * Iterate over the list of time events and find the time event that is
* associated with a P2P_DEVICE interface. * associated with a P2P_DEVICE interface.
...@@ -680,22 +749,41 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm) ...@@ -680,22 +749,41 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
* event at any given time and this time event coresponds to a ROC * event at any given time and this time event coresponds to a ROC
* request * request
*/ */
mvmvif = NULL;
spin_lock_bh(&mvm->time_event_lock);
list_for_each_entry(te_data, &mvm->time_event_list, list) { list_for_each_entry(te_data, &mvm->time_event_list, list) {
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
te_data->running) {
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
break; is_p2p = true;
goto remove_te;
} }
} }
/*
* Iterate over the list of aux roc time events and find the time
* event that is associated with a BSS interface.
* This assumes that a BSS interface can have only a single time
* event at any given time and this time event coresponds to a ROC
* request
*/
list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
if (te_data->running) {
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
goto remove_te;
}
}
remove_te:
spin_unlock_bh(&mvm->time_event_lock); spin_unlock_bh(&mvm->time_event_lock);
if (!mvmvif) { if (!mvmvif) {
IWL_WARN(mvm, "P2P_DEVICE no remain on channel event\n"); IWL_WARN(mvm, "No remain on channel event\n");
return; return;
} }
if (is_p2p)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data); iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
iwl_mvm_roc_finished(mvm); iwl_mvm_roc_finished(mvm);
} }
......
...@@ -182,14 +182,14 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -182,14 +182,14 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type); int duration, enum ieee80211_roc_type type);
/** /**
* iwl_mvm_stop_p2p_roc - stop remain on channel for p2p device functionlity * iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component * @mvm: the mvm component
* *
* This function can be used to cancel an ongoing ROC session. * This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC * The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session. * session, but will not wait for the actual stopping of the session.
*/ */
void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm); void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
/** /**
* iwl_mvm_remove_time_event - general function to clean up of time event * iwl_mvm_remove_time_event - general function to clean up of time event
......
...@@ -95,30 +95,79 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) ...@@ -95,30 +95,79 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
iwl_mvm_set_hw_ctkill_state(mvm, false); iwl_mvm_set_hw_ctkill_state(mvm, false);
} }
static bool iwl_mvm_temp_notif(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
struct iwl_rx_packet *pkt, void *data) {
/* ignore the notification if we are in test mode */
if (mvm->temperature_test)
return;
if (mvm->temperature == temp)
return;
mvm->temperature = temp;
iwl_mvm_tt_handler(mvm);
}
static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{ {
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
int *temp = data;
struct iwl_dts_measurement_notif *notif; struct iwl_dts_measurement_notif *notif;
int len = iwl_rx_packet_payload_len(pkt); int len = iwl_rx_packet_payload_len(pkt);
int temp;
if (WARN_ON_ONCE(len != sizeof(*notif))) { if (WARN_ON_ONCE(len != sizeof(*notif))) {
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
return true; return -EINVAL;
} }
notif = (void *)pkt->data; notif = (void *)pkt->data;
*temp = le32_to_cpu(notif->temp); temp = le32_to_cpu(notif->temp);
/* shouldn't be negative, but since it's s32, make sure it isn't */ /* shouldn't be negative, but since it's s32, make sure it isn't */
if (WARN_ON_ONCE(*temp < 0)) if (WARN_ON_ONCE(temp < 0))
*temp = 0; temp = 0;
IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
return temp;
}
static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
int *temp = data;
int ret;
IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", *temp); ret = iwl_mvm_temp_notif_parse(mvm, pkt);
if (ret < 0)
return true; return true;
*temp = ret;
return true;
}
int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
int temp;
/* the notification is handled synchronously in ctkill, so skip here */
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
return 0;
temp = iwl_mvm_temp_notif_parse(mvm, pkt);
if (temp < 0)
return 0;
iwl_mvm_tt_temp_changed(mvm, temp);
return 0;
} }
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
...@@ -141,7 +190,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) ...@@ -141,7 +190,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
temp_notif, ARRAY_SIZE(temp_notif), temp_notif, ARRAY_SIZE(temp_notif),
iwl_mvm_temp_notif, &temp); iwl_mvm_temp_notif_wait, &temp);
ret = iwl_mvm_get_temp_cmd(mvm); ret = iwl_mvm_get_temp_cmd(mvm);
if (ret) { if (ret) {
......
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
/* /*
* Sets most of the Tx cmd's fields * Sets most of the Tx cmd's fields
*/ */
static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id) struct ieee80211_tx_info *info, u8 sta_id)
{ {
...@@ -149,11 +149,9 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -149,11 +149,9 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/* /*
* Sets the fields in the Tx cmd that are rate related * Sets the fields in the Tx cmd that are rate related
*/ */
static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, struct ieee80211_sta *sta, __le16 fc)
__le16 fc)
{ {
u32 rate_flags; u32 rate_flags;
int rate_idx; int rate_idx;
...@@ -232,7 +230,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, ...@@ -232,7 +230,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
/* /*
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
*/ */
static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd, struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb_frag) struct sk_buff *skb_frag)
...@@ -426,6 +424,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -426,6 +424,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
if (sta->tdls) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
}
if (is_ampdu) { if (is_ampdu) {
if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON)) if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
goto drop_unlock_sta; goto drop_unlock_sta;
...@@ -660,6 +665,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -660,6 +665,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl); seq_ctl = le16_to_cpu(hdr->seq_ctrl);
} }
/*
* TODO: this is not accurate if we are freeing more than one
* packet.
*/
info->status.tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
info->status.status_driver_data[0] = info->status.status_driver_data[0] =
(void *)(uintptr_t)tx_resp->reduced_tpc; (void *)(uintptr_t)tx_resp->reduced_tpc;
...@@ -852,6 +863,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, ...@@ -852,6 +863,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta->tid_data[tid].rate_n_flags = mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate); le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -880,6 +893,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, ...@@ -880,6 +893,8 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
info->status.ampdu_len = ba_notif->txed; info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info); info);
/* TODO: not accounted if the whole A-MPDU failed */
info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] = info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc; (void *)(uintptr_t)tid_data->reduced_tpc;
} }
......
...@@ -499,6 +499,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {} ...@@ -499,6 +499,7 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans; struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie; struct iwl_trans_pcie *trans_pcie;
int ret; int ret;
...@@ -507,6 +508,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -507,6 +508,25 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(iwl_trans)) if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans); return PTR_ERR(iwl_trans);
#if IS_ENABLED(CONFIG_IWLMVM)
/*
* special-case 7265D, it has the same PCI IDs.
*
* Note that because we already pass the cfg to the transport above,
* all the parameters that the transport uses must, until that is
* changed, be identical to the ones in the 7265D configuration.
*/
if (cfg == &iwl7265_2ac_cfg)
cfg_7265d = &iwl7265d_2ac_cfg;
else if (cfg == &iwl7265_2n_cfg)
cfg_7265d = &iwl7265d_2n_cfg;
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
cfg = cfg_7265d;
#endif
pci_set_drvdata(pdev, iwl_trans); pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
......
...@@ -79,6 +79,10 @@ ...@@ -79,6 +79,10 @@
#include "iwl-fw-error-dump.h" #include "iwl-fw-error-dump.h"
#include "internal.h" #include "internal.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
...@@ -512,6 +516,9 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) ...@@ -512,6 +516,9 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT); HW_READY_TIMEOUT);
if (ret >= 0)
iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
return ret; return ret;
} }
...@@ -624,14 +631,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, ...@@ -624,14 +631,28 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
} }
for (offset = 0; offset < section->len; offset += chunk_sz) { for (offset = 0; offset < section->len; offset += chunk_sz) {
u32 copy_size; u32 copy_size, dst_addr;
bool extended_addr = false;
copy_size = min_t(u32, chunk_sz, section->len - offset); copy_size = min_t(u32, chunk_sz, section->len - offset);
dst_addr = section->offset + offset;
if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
dst_addr <= IWL_FW_MEM_EXTENDED_END)
extended_addr = true;
if (extended_addr)
iwl_set_bits_prph(trans, LMPM_CHICK,
LMPM_CHICK_EXTENDED_ADDR_SPACE);
memcpy(v_addr, (u8 *)section->data + offset, copy_size); memcpy(v_addr, (u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans, ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
section->offset + offset, copy_size);
p_addr, copy_size);
if (extended_addr)
iwl_clear_bits_prph(trans, LMPM_CHICK,
LMPM_CHICK_EXTENDED_ADDR_SPACE);
if (ret) { if (ret) {
IWL_ERR(trans, IWL_ERR(trans,
"Could not load the [%d] uCode section\n", "Could not load the [%d] uCode section\n",
...@@ -939,7 +960,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) ...@@ -939,7 +960,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock); spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */ /* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(20);
/* clear all status bits */ /* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
...@@ -1031,6 +1053,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1031,6 +1053,9 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL, ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
...@@ -1233,6 +1258,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, ...@@ -1233,6 +1258,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
/* this bit wakes up the NIC */ /* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
/* /*
* These bits say the device is running, and should keep running for * These bits say the device is running, and should keep running for
......
...@@ -1431,6 +1431,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, ...@@ -1431,6 +1431,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
trans_pcie->cmd_in_flight = true; trans_pcie->cmd_in_flight = true;
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL, ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment