Commit 739596b0 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2016-03-02' of...

Merge tag 'iwlwifi-next-for-kalle-2016-03-02' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* add support for thermal device / cooling device (Chaya Rachel)
* fixes for 9000 devices data path (Sara Sharon)
* improvements in scheduled scan w/o profiles (Luca)
* new firmware support (-21.ucode)
* add MSIX support for 9000 devices (Haim Dreyfuss)
* cleanup in PCIe initialization
* enable MU-MIMO and take care of firmware restart(Sara Sharon)
        ===> This needs mac80211-next
* add support for large SKBs in mvm to reach A-MSDU
        ===> This needs mac80211-next
* add support for filtering frames from a BA session (Sara Sharon)
        ===> This needs mac80211-next
* start implementing the new Rx path for 9000 devices (Sara Sharon)
* enable the new RRM feature flag (Beni Lev)
* fix U-APSD enablement on P2P Client (Avri Altman)
* fix beacon abort enablement (Avri Altman)
* forbid beacon storing with WoWLAN (Matti Gottlieb)
* support unified uSniffer / regular firmware image (Golan Ben-Ami)
* fix a race between debugfs hooks and iface up (Chaya Rachel Ivgi)
* fixes for runtime PM (Luca)
* add a new module paramater to disable VHT (Andrei Otcheretianski)
* build infrastructure for Dynamic Queue Allocation (Liad Kaufman)
parents 0ea6f0c5 53f09e74
...@@ -73,8 +73,8 @@ ...@@ -73,8 +73,8 @@
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17 #define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17
#define IWL7265D_UCODE_API_MAX 20 #define IWL7265D_UCODE_API_MAX 21
#define IWL3168_UCODE_API_MAX 20 #define IWL3168_UCODE_API_MAX 21
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13 #define IWL7260_UCODE_API_OK 13
......
...@@ -70,8 +70,8 @@ ...@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 20 #define IWL8000_UCODE_API_MAX 21
#define IWL8265_UCODE_API_MAX 20 #define IWL8265_UCODE_API_MAX 21
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 13 #define IWL8000_UCODE_API_OK 13
...@@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = { ...@@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
.nvm_ver = IWL8000_NVM_VERSION, .nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
}; };
const struct iwl_cfg iwl4165_2ac_cfg = { const struct iwl_cfg iwl4165_2ac_cfg = {
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 20 #define IWL9000_UCODE_API_MAX 21
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL9000_UCODE_API_OK 13 #define IWL9000_UCODE_API_OK 13
...@@ -139,7 +139,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -139,7 +139,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.smem_len = IWL9000_SMEM_LEN, \ .smem_len = IWL9000_SMEM_LEN, \
.thermal_params = &iwl9000_tt_params, \ .thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \ .apmg_not_supported = true, \
.mq_rx_supported = true .mq_rx_supported = true, \
.vht_mu_mimo_supported = true
const struct iwl_cfg iwl9260_2ac_cfg = { const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260", .name = "Intel(R) Dual Band Wireless AC 9260",
......
...@@ -312,6 +312,7 @@ struct iwl_pwr_tx_backoff { ...@@ -312,6 +312,7 @@ struct iwl_pwr_tx_backoff {
* @smem_offset: offset from which the SMEM begins * @smem_offset: offset from which the SMEM begins
* @smem_len: the length of SMEM * @smem_len: the length of SMEM
* @mq_rx_supported: multi-queue rx support * @mq_rx_supported: multi-queue rx support
* @vht_mu_mimo_supported: VHT MU-MIMO support
* *
* We enable the driver to be backward compatible wrt. hardware features. * We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs * API differences in uCode shouldn't be handled here but through TLVs
...@@ -364,6 +365,7 @@ struct iwl_cfg { ...@@ -364,6 +365,7 @@ struct iwl_cfg {
const struct iwl_tt_params *thermal_params; const struct iwl_tt_params *thermal_params;
bool apmg_not_supported; bool apmg_not_supported;
bool mq_rx_supported; bool mq_rx_supported;
bool vht_mu_mimo_supported;
}; };
/* /*
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -549,4 +550,52 @@ enum dtd_diode_reg { ...@@ -549,4 +550,52 @@ enum dtd_diode_reg {
DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
}; };
/*****************************************************************************
* MSIX related registers *
*****************************************************************************/
#define CSR_MSIX_BASE (0x2000)
#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800)
#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804)
#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808)
#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C)
#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810)
#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880)
#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890)
#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000)
#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause))
#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause))
#define MSIX_FH_INT_CAUSES_Q(q) (q)
/*
* Causes for the FH register interrupts
*/
enum msix_fh_int_causes {
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
MSIX_FH_INT_CAUSES_S2D = BIT(19),
MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
};
/*
* Causes for the HW register interrupts
*/
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
};
#define MSIX_MIN_INTERRUPT_VECTORS 2
#define MSIX_AUTO_CLEAR_CAUSE 0
#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
#endif /* !__iwl_csr_h__ */ #endif /* !__iwl_csr_h__ */
...@@ -73,12 +73,12 @@ TRACE_EVENT(iwlwifi_dev_rx, ...@@ -73,12 +73,12 @@ TRACE_EVENT(iwlwifi_dev_rx,
TP_ARGS(dev, trans, pkt, len), TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry( TP_STRUCT__entry(
DEV_ENTRY DEV_ENTRY
__field(u8, cmd) __field(u16, cmd)
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
), ),
TP_fast_assign( TP_fast_assign(
DEV_ASSIGN; DEV_ASSIGN;
__entry->cmd = pkt->hdr.cmd; __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
memcpy(__get_dynamic_array(rxbuf), pkt, memcpy(__get_dynamic_array(rxbuf), pkt,
iwl_rx_trace_len(trans, pkt, len)); iwl_rx_trace_len(trans, pkt, len));
), ),
......
...@@ -1033,7 +1033,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1033,7 +1033,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
} }
} }
if (usniffer_req && !*usniffer_images) { if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
usniffer_req && !*usniffer_images) {
IWL_ERR(drv, IWL_ERR(drv,
"user selected to work with usniffer but usniffer image isn't available in ucode package\n"); "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
return -EINVAL; return -EINVAL;
...@@ -1718,3 +1719,7 @@ MODULE_PARM_DESC(fw_monitor, ...@@ -1718,3 +1719,7 @@ MODULE_PARM_DESC(fw_monitor,
module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay, module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay,
uint, S_IRUGO); uint, S_IRUGO);
MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -371,6 +371,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) ...@@ -371,6 +371,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ #define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24
#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) #define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ #define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ #define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
#define RFH_DMA_EN_ENABLE_VAL BIT(31) #define RFH_DMA_EN_ENABLE_VAL BIT(31)
...@@ -378,10 +379,13 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) ...@@ -378,10 +379,13 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RFH_RXF_RXQ_ACTIVE 0xA0980C #define RFH_RXF_RXQ_ACTIVE 0xA0980C
#define RFH_GEN_CFG 0xA09800 #define RFH_GEN_CFG 0xA09800
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
#define DEFAULT_RXQ_NUM 8 #define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
#define DEFAULT_RXQ_NUM 0
/* end of 9000 rx series registers */ /* end of 9000 rx series registers */
......
...@@ -318,6 +318,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; ...@@ -318,6 +318,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
* from AP and will send it upon d0i3 exit. * from AP and will send it upon d0i3 exit.
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
* thresholds reporting
* @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
* @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
* regular image.
* *
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/ */
...@@ -351,6 +357,10 @@ enum iwl_ucode_tlv_capa { ...@@ -351,6 +357,10 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
NUM_IWL_UCODE_TLV_CAPA NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__ #ifdef __CHECKER__
......
...@@ -115,6 +115,7 @@ enum iwl_amsdu_size { ...@@ -115,6 +115,7 @@ enum iwl_amsdu_size {
* entering D0i3 (in msecs) * entering D0i3 (in msecs)
* @lar_disable: disable LAR (regulatory), default = 0 * @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor * @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
*/ */
struct iwl_mod_params { struct iwl_mod_params {
int sw_crypto; int sw_crypto;
...@@ -135,6 +136,7 @@ struct iwl_mod_params { ...@@ -135,6 +136,7 @@ struct iwl_mod_params {
unsigned int d0i3_entry_delay; unsigned int d0i3_entry_delay;
bool lar_disable; bool lar_disable;
bool fw_monitor; bool fw_monitor;
bool disable_11ac;
}; };
#endif /* #__iwl_modparams_h__ */ #endif /* #__iwl_modparams_h__ */
...@@ -366,6 +366,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, ...@@ -366,6 +366,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
max_ampdu_exponent << max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
if (cfg->ht_params->ldpc) if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
...@@ -449,7 +452,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, ...@@ -449,7 +452,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IEEE80211_BAND_5GHZ); IEEE80211_BAND_5GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains); tx_chains, rx_chains);
if (data->sku_cap_11ac_enable) if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains); tx_chains, rx_chains);
......
...@@ -404,4 +404,6 @@ enum { ...@@ -404,4 +404,6 @@ enum {
LMPM_PAGE_PASS_NOTIF_POS = BIT(20), LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
}; };
#define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSIX_ENABLE BIT(25)
#endif /* __iwl_prph_h__ */ #endif /* __iwl_prph_h__ */
...@@ -836,6 +836,7 @@ struct iwl_trans { ...@@ -836,6 +836,7 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
/* pointer to trans specific struct */ /* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */ /*Ensure that this pointer will always be aligned to sizeof pointer */
......
...@@ -816,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) ...@@ -816,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{ {
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
iwl_trans_stop_device(mvm->trans); iwl_mvm_stop_device(mvm);
/* /*
* Set the HW restart bit -- this is mostly true as we're * Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though * going to load new firmware and reprogram that, though
...@@ -856,8 +855,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, ...@@ -856,8 +855,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->is_11n_connection = wowlan_config_cmd->is_11n_connection =
ap_sta->ht_cap.ht_supported; ap_sta->ht_cap.ht_supported;
wowlan_config_cmd->flags = ENABLE_L3_FILTERING | wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
ENABLE_STORE_BEACON;
/* Query the last used seqno and set it */ /* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
* *
*****************************************************************************/ *****************************************************************************/
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ieee80211.h>
#include "mvm.h" #include "mvm.h"
#include "fw-dbg.h" #include "fw-dbg.h"
...@@ -976,7 +977,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, ...@@ -976,7 +977,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
ARRAY_SIZE(cmd.indirection_table) % nbytes); ARRAY_SIZE(cmd.indirection_table) % nbytes);
memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
...@@ -1080,6 +1081,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, ...@@ -1080,6 +1081,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count; return count;
} }
static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
unsigned int max_amsdu_len;
int ret;
ret = kstrtouint(buf, 0, &max_amsdu_len);
if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
return -EINVAL;
mvm->max_amsdu_len = max_amsdu_len;
return count;
}
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
...@@ -1497,7 +1514,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); ...@@ -1497,7 +1514,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
...@@ -1540,6 +1559,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) ...@@ -1540,6 +1559,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
......
...@@ -391,4 +391,56 @@ struct iwl_rss_config_cmd { ...@@ -391,4 +391,56 @@ struct iwl_rss_config_cmd {
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
/**
* struct iwl_rxq_sync_cmd - RXQ notification trigger
*
* @flags: flags of the notification. bit 0:3 are the sender queue
* @rxq_mask: rx queues to send the notification on
* @count: number of bytes in payload, should be DWORD aligned
* @payload: data to send to rx queues
*/
struct iwl_rxq_sync_cmd {
__le32 flags;
__le32 rxq_mask;
__le32 count;
u8 payload[];
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
* struct iwl_rxq_sync_notification - Notification triggered by RXQ
* sync command
*
* @count: number of bytes in payload
* @payload: data to send to rx queues
*/
struct iwl_rxq_sync_notification {
__le32 count;
u8 payload[];
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
* Internal message identifier
*
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
*
* @type: value from &iwl_mvm_rxq_notif_type
* @data: payload
*/
struct iwl_mvm_internal_rxq_notif {
u32 type;
u8 data[];
} __packed;
#endif /* __fw_api_rx_h__ */ #endif /* __fw_api_rx_h__ */
...@@ -119,6 +119,8 @@ enum { ...@@ -119,6 +119,8 @@ enum {
SCAN_ABORT_UMAC = 0xe, SCAN_ABORT_UMAC = 0xe,
SCAN_COMPLETE_UMAC = 0xf, SCAN_COMPLETE_UMAC = 0xf,
BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
/* station table */ /* station table */
ADD_STA_KEY = 0x17, ADD_STA_KEY = 0x17,
ADD_STA = 0x18, ADD_STA = 0x18,
...@@ -279,9 +281,19 @@ enum { ...@@ -279,9 +281,19 @@ enum {
*/ */
enum iwl_phy_ops_subcmd_ids { enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
}; };
enum iwl_data_path_subcmd_ids {
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
MU_GROUP_MGMT_NOTIF = 0xFE,
RX_QUEUES_NOTIFICATION = 0xFF,
};
enum iwl_prot_offload_subcmd_ids { enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF, STORED_BEACON_NTF = 0xFF,
}; };
...@@ -291,6 +303,7 @@ enum { ...@@ -291,6 +303,7 @@ enum {
LEGACY_GROUP = 0x0, LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1, LONG_GROUP = 0x1,
PHY_OPS_GROUP = 0x4, PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb, PROT_OFFLOAD_GROUP = 0xb,
}; };
...@@ -1278,6 +1291,26 @@ struct iwl_fw_bcast_filter { ...@@ -1278,6 +1291,26 @@ struct iwl_fw_bcast_filter {
struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
} __packed; /* BCAST_FILTER_S_VER_1 */ } __packed; /* BCAST_FILTER_S_VER_1 */
#define BA_WINDOW_STREAMS_MAX 16
#define BA_WINDOW_STATUS_TID_MSK 0x000F
#define BA_WINDOW_STATUS_STA_ID_POS 4
#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
/**
* struct iwl_ba_window_status_notif - reordering window's status notification
* @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
* @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
* @start_seq_num: the start sequence number of the bitmap
* @mpdu_rx_count: the number of received MPDUs since entering D0i3
*/
struct iwl_ba_window_status_notif {
__le64 bitmap[BA_WINDOW_STREAMS_MAX];
__le16 ra_tid[BA_WINDOW_STREAMS_MAX];
__le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/** /**
* struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
* @default_discard: default action for this mac (discard (1) / pass (0)). * @default_discard: default action for this mac (discard (1) / pass (0)).
...@@ -1675,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd { ...@@ -1675,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd {
} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
/** /**
* iwl_dts_measurement_notif - notification received with the measurements * struct iwl_dts_measurement_notif_v1 - measurements notification
* *
* @temp: the measured temperature * @temp: the measured temperature
* @voltage: the measured voltage * @voltage: the measured voltage
*/ */
struct iwl_dts_measurement_notif { struct iwl_dts_measurement_notif_v1 {
__le32 temp; __le32 temp;
__le32 voltage; __le32 voltage;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
/**
* struct iwl_dts_measurement_notif_v2 - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
* @threshold_idx: the trip index that was crossed
*/
struct iwl_dts_measurement_notif_v2 {
__le32 temp;
__le32 voltage;
__le32 threshold_idx;
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
/**
* struct ct_kill_notif - CT-kill entry notification
*
* @temperature: the current temperature in celsius
* @reserved: reserved
*/
struct ct_kill_notif {
__le16 temperature;
__le16 reserved;
} __packed; /* GRP_PHY_CT_KILL_NTF */
/**
* enum ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget
*/
enum iwl_mvm_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
CTDP_CMD_OPERATION_STOP = 0x2,
CTDP_CMD_OPERATION_REPORT = 0x4,
};/* CTDP_CMD_OPERATION_TYPE_E */
/**
* struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
*
* @operation: see &enum iwl_mvm_ctdp_cmd_operation
* @budget: the budget in milliwatt
* @window_size: defined in API but not used
*/
struct iwl_mvm_ctdp_cmd {
__le32 operation;
__le32 budget;
__le32 window_size;
} __packed;
#define IWL_MAX_DTS_TRIPS 8
/**
* struct iwl_temp_report_ths_cmd - set temperature thresholds
*
* @num_temps: number of temperature thresholds passed
* @thresholds: array with the thresholds to be configured
*/
struct temp_report_ths_cmd {
__le32 num_temps;
__le16 thresholds[IWL_MAX_DTS_TRIPS];
} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
/*********************************** /***********************************
* TDLS API * TDLS API
...@@ -1858,6 +1953,31 @@ struct iwl_shared_mem_cfg { ...@@ -1858,6 +1953,31 @@ struct iwl_shared_mem_cfg {
__le32 page_buff_size; __le32 page_buff_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
/**
* VHT MU-MIMO group configuration
*
* @membership_status: a bitmap of MU groups
* @user_position:the position of station in a group. If the station is in the
* group then bits (group * 2) is the position -1
*/
struct iwl_mu_group_mgmt_cmd {
__le32 reserved;
__le32 membership_status[2];
__le32 user_position[4];
} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
/**
* struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
*
* @membership_status: a bitmap of MU groups
* @user_position: the position of station in a group. If the station is in the
* group then bits (group * 2) is the position -1
*/
struct iwl_mu_group_mgmt_notif {
__le32 membership_status[2];
__le32 user_position[4];
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
#define MAX_STORED_BEACON_SIZE 600 #define MAX_STORED_BEACON_SIZE 600
/** /**
......
...@@ -121,12 +121,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) ...@@ -121,12 +121,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
} }
static void iwl_free_fw_paging(struct iwl_mvm *mvm) void iwl_free_fw_paging(struct iwl_mvm *mvm)
{ {
int i; int i;
...@@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm) ...@@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
get_order(mvm->fw_paging_db[i].fw_paging_size)); get_order(mvm->fw_paging_db[i].fw_paging_size));
} }
kfree(mvm->trans->paging_download_buf); kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
} }
...@@ -537,7 +539,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -537,7 +539,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_sf_region st_fwrd_space; struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR && if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
else else
fw = iwl_get_ucode_image(mvm, ucode_type); fw = iwl_get_ucode_image(mvm, ucode_type);
...@@ -952,8 +956,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -952,8 +956,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error; goto error;
} }
#ifdef CONFIG_THERMAL
if (iwl_mvm_is_tt_in_fw(mvm)) {
/* in order to give the responsibility of ct-kill and
* TX backoff to FW we need to send empty temperature reporting
* cmd during init time
*/
iwl_mvm_send_temp_report_ths_cmd(mvm);
} else {
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
}
/* TODO: read the budget from BIOS / Platform NVM */
if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
mvm->cooling_dev.cur_state);
#else
/* Initialize tx backoffs to the minimal possible */ /* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0); iwl_mvm_tt_tx_backoff(mvm, 0);
#endif
WARN_ON(iwl_mvm_config_ltr(mvm)); WARN_ON(iwl_mvm_config_ltr(mvm));
...@@ -989,7 +1011,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -989,7 +1011,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0; return 0;
error: error:
iwl_trans_stop_device(mvm->trans); iwl_mvm_stop_device(mvm);
return ret; return ret;
} }
...@@ -1033,7 +1055,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) ...@@ -1033,7 +1055,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return 0; return 0;
error: error:
iwl_trans_stop_device(mvm->trans); iwl_mvm_stop_device(mvm);
return ret; return ret;
} }
......
...@@ -1484,6 +1484,8 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, ...@@ -1484,6 +1484,8 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
/* update rx_status according to the notification's metadata */ /* update rx_status according to the notification's metadata */
memset(&rx_status, 0, sizeof(rx_status)); memset(&rx_status, 0, sizeof(rx_status));
rx_status.mactime = le64_to_cpu(sb->tsf); rx_status.mactime = le64_to_cpu(sb->tsf);
/* TSF as indicated by the firmware is at INA time */
rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.device_timestamp = le32_to_cpu(sb->system_time);
rx_status.band = rx_status.band =
(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -69,7 +70,6 @@ ...@@ -69,7 +70,6 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/devcoredump.h>
#include <linux/time.h> #include <linux/time.h>
#include <net/mac80211.h> #include <net/mac80211.h>
#include <net/ieee80211_radiotap.h> #include <net/ieee80211_radiotap.h>
...@@ -85,7 +85,6 @@ ...@@ -85,7 +85,6 @@
#include "testmode.h" #include "testmode.h"
#include "iwl-fw-error-dump.h" #include "iwl-fw-error-dump.h"
#include "iwl-prph.h" #include "iwl-prph.h"
#include "iwl-csr.h"
#include "iwl-nvm-parse.h" #include "iwl-nvm-parse.h"
#include "fw-dbg.h" #include "fw-dbg.h"
...@@ -611,6 +610,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -611,6 +610,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM);
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
...@@ -847,6 +848,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ...@@ -847,6 +848,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
u16 tid = params->tid; u16 tid = params->tid;
u16 *ssn = &params->ssn; u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size; u8 buf_size = params->buf_size;
bool amsdu = params->amsdu;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action); sta->addr, tid, action);
...@@ -907,7 +909,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ...@@ -907,7 +909,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
break; break;
case IEEE80211_AMPDU_TX_OPERATIONAL: case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
buf_size, amsdu);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
...@@ -969,7 +972,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ...@@ -969,7 +972,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/ */
iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
iwl_trans_stop_device(mvm->trans); iwl_mvm_stop_device(mvm);
mvm->scan_status = 0; mvm->scan_status = 0;
mvm->ps_disabled = false; mvm->ps_disabled = false;
...@@ -1138,7 +1141,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) ...@@ -1138,7 +1141,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
*/ */
flush_work(&mvm->roc_done_wk); flush_work(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans); iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm); iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */ /* async_handlers_list is empty and will stay empty: HW is stopped */
...@@ -1169,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) ...@@ -1169,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
mvm->scan_uid_status[i] = 0; mvm->scan_uid_status[i] = 0;
} }
} }
mvm->ucode_loaded = false;
} }
static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
...@@ -1762,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) ...@@ -1762,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
} }
#endif #endif
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mu_group_mgmt_cmd cmd = {};
memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
WLAN_MEMBERSHIP_LEN);
memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
WLAN_USER_POSITION_LEN);
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP,
UPDATE_MU_GROUPS_CMD),
0, sizeof(cmd), &cmd);
}
static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
if (vif->mu_mimo_owner) {
struct iwl_mu_group_mgmt_notif *notif = _data;
/*
* MU-MIMO Group Id action frame is little endian. We treat
* the data received from firmware as if it came from the
* action frame, so no conversion is needed.
*/
ieee80211_update_mu_groups(vif,
(u8 *)&notif->membership_status,
(u8 *)&notif->user_position);
}
}
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, struct ieee80211_bss_conf *bss_conf,
...@@ -1870,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -1870,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
vif->addr); vif->addr);
} }
/*
* The firmware tracks the MU-MIMO group on its own.
* However, on HW restart we should restore this data.
*/
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
(changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
ret = iwl_mvm_update_mu_groups(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update VHT MU_MIMO groups\n");
}
iwl_mvm_recalc_multicast(mvm); iwl_mvm_recalc_multicast(mvm);
iwl_mvm_configure_bcast_filter(mvm); iwl_mvm_configure_bcast_filter(mvm);
...@@ -1896,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -1896,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
} }
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
/*
* Send power command on every beacon change,
* because we may have not enabled beacon abort yet.
*/
BSS_CHANGED_BEACON_INFO)) {
ret = iwl_mvm_power_update_mac(mvm); ret = iwl_mvm_power_update_mac(mvm);
if (ret) if (ret)
IWL_ERR(mvm, "failed to update power mode\n"); IWL_ERR(mvm, "failed to update power mode\n");
...@@ -2083,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, ...@@ -2083,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
bss_conf->txpower); bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
} }
} }
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
...@@ -2276,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2276,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return; return;
if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
if (iwlwifi_mod_params.uapsd_disable) { if (iwlwifi_mod_params.uapsd_disable) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return; return;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -71,6 +73,10 @@ ...@@ -71,6 +73,10 @@
#include <linux/leds.h> #include <linux/leds.h>
#include <linux/in6.h> #include <linux/in6.h>
#ifdef CONFIG_THERMAL
#include <linux/thermal.h>
#endif
#include "iwl-op-mode.h" #include "iwl-op-mode.h"
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-notif-wait.h" #include "iwl-notif-wait.h"
...@@ -487,6 +493,12 @@ enum iwl_mvm_scan_type { ...@@ -487,6 +493,12 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_FRAGMENTED, IWL_SCAN_TYPE_FRAGMENTED,
}; };
enum iwl_mvm_sched_scan_pass_all_states {
SCHED_SCAN_PASS_ALL_DISABLED,
SCHED_SCAN_PASS_ALL_ENABLED,
SCHED_SCAN_PASS_ALL_FOUND,
};
/** /**
* struct iwl_nvm_section - describes an NVM section in memory. * struct iwl_nvm_section - describes an NVM section in memory.
* *
...@@ -517,6 +529,30 @@ struct iwl_mvm_tt_mgmt { ...@@ -517,6 +529,30 @@ struct iwl_mvm_tt_mgmt {
bool throttle; bool throttle;
}; };
#ifdef CONFIG_THERMAL
/**
*struct iwl_mvm_thermal_device - thermal zone related data
* @temp_trips: temperature thresholds for report
* @fw_trips_index: keep indexes to original array - temp_trips
* @tzone: thermal zone device data
*/
struct iwl_mvm_thermal_device {
s16 temp_trips[IWL_MAX_DTS_TRIPS];
u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
};
/*
* iwl_mvm_cooling_device
* @cur_state: current state in milliwatts
* @cdev: struct thermal cooling device
*/
struct iwl_mvm_cooling_device {
u32 cur_state;
struct thermal_cooling_device *cdev;
};
#endif
#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
struct iwl_mvm_frame_stats { struct iwl_mvm_frame_stats {
...@@ -657,6 +693,7 @@ struct iwl_mvm { ...@@ -657,6 +693,7 @@ struct iwl_mvm {
void *scan_cmd; void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
/* max number of simultaneous scans the FW supports */ /* max number of simultaneous scans the FW supports */
unsigned int max_scans; unsigned int max_scans;
...@@ -797,6 +834,11 @@ struct iwl_mvm { ...@@ -797,6 +834,11 @@ struct iwl_mvm {
/* Thermal Throttling and CTkill */ /* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle; struct iwl_mvm_tt_mgmt thermal_throttle;
#ifdef CONFIG_THERMAL
struct iwl_mvm_thermal_device tz_device;
struct iwl_mvm_cooling_device cooling_dev;
#endif
s32 temperature; /* Celsius */ s32 temperature; /* Celsius */
/* /*
* Debug option to set the NIC temperature. This option makes the * Debug option to set the NIC temperature. This option makes the
...@@ -819,6 +861,7 @@ struct iwl_mvm { ...@@ -819,6 +861,7 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */ /* Indicate if device power save is allowed */
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
unsigned int max_amsdu_len; /* used for debugfs only */
struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif;
...@@ -943,7 +986,8 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) ...@@ -943,7 +986,8 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{ {
return fw_has_capa(&mvm->fw->ucode_capa, /* Make sure DQA isn't allowed in driver until feature is complete */
return false && fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT); IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
} }
...@@ -1028,6 +1072,28 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) ...@@ -1028,6 +1072,28 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
} }
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{
#ifdef CONFIG_THERMAL
/* these two TLV are redundant since the responsibility to CT-kill by
* FW happens only after we send at least one command of
* temperature THs report.
*/
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
#else /* CONFIG_THERMAL */
return false;
#endif /* CONFIG_THERMAL */
}
static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info { struct iwl_rate_info {
...@@ -1160,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -1160,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue); struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, int queue); struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count);
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
...@@ -1203,6 +1273,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, ...@@ -1203,6 +1273,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
...@@ -1244,6 +1318,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -1244,6 +1318,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
/* Paging */
void iwl_free_fw_paging(struct iwl_mvm *mvm);
/* MVM debugfs */ /* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
...@@ -1476,32 +1553,30 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -1476,32 +1553,30 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
} }
static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
int mac80211_queue, int fifo,
int sta_id, int tid, int frame_limit,
u16 ssn, unsigned int wdg_timeout)
{ {
struct iwl_trans_txq_scd_cfg cfg = { mvm->ucode_loaded = false;
.fifo = fifo, iwl_trans_stop_device(mvm->trans);
.sta_id = sta_id,
.tid = tid,
.frame_limit = frame_limit,
.aggregate = true,
};
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
} }
/* Stop/start all mac queues in a given bitmap */
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
/* Thermal management and CT-kill */ /* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm, void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm);
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
/* Location Aware Regulatory */ /* Location Aware Regulatory */
struct iwl_mcc_update_resp * struct iwl_mcc_update_resp *
......
...@@ -236,6 +236,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -236,6 +236,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
iwl_mvm_rx_ant_coupling_notif, true), iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
iwl_mvm_window_status_notif, false),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
...@@ -263,6 +266,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -263,6 +266,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
iwl_mvm_temp_notif, true), iwl_mvm_temp_notif, true),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
iwl_mvm_ct_kill_notif, false),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
true), true),
...@@ -270,6 +275,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { ...@@ -270,6 +275,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
iwl_mvm_rx_stored_beacon_notif, false), iwl_mvm_rx_stored_beacon_notif, false),
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
iwl_mvm_mu_mimo_grp_notif, false),
}; };
#undef RX_HANDLER #undef RX_HANDLER
...@@ -292,6 +299,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { ...@@ -292,6 +299,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC),
HCMD_NAME(TOF_CMD), HCMD_NAME(TOF_CMD),
HCMD_NAME(TOF_NOTIFICATION), HCMD_NAME(TOF_NOTIFICATION),
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA_KEY),
HCMD_NAME(ADD_STA), HCMD_NAME(ADD_STA),
HCMD_NAME(REMOVE_STA), HCMD_NAME(REMOVE_STA),
...@@ -387,9 +395,22 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { ...@@ -387,9 +395,22 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/ */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
}; };
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
};
/* Please keep this array *SORTED* by hex value. /* Please keep this array *SORTED* by hex value.
* Access is done through binary search * Access is done through binary search
*/ */
...@@ -401,6 +422,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { ...@@ -401,6 +422,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
}; };
...@@ -474,8 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -474,8 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq; op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
} else { } else {
op_mode->ops = &iwl_mvm_ops; op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size =
sizeof(struct iwl_rx_mpdu_res_start);
if (WARN_ON(trans->num_rx_queues > 1)) if (WARN_ON(trans->num_rx_queues > 1))
goto out_free; goto out_free;
...@@ -567,7 +592,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -567,7 +592,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg); iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
...@@ -588,7 +612,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -588,7 +612,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->cfg->name, mvm->trans->hw_rev); mvm->cfg->name, mvm->trans->hw_rev);
min_backoff = calc_min_backoff(trans, cfg); min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff); iwl_mvm_thermal_initialize(mvm, min_backoff);
if (iwlwifi_mod_params.nvm_file) if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
...@@ -619,7 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -619,7 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true); err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg) if (!err || !iwlmvm_mod_params.init_dbg)
iwl_trans_stop_device(trans); iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */ /* returns 0 if successful, 1 if success but in rfkill */
...@@ -648,19 +672,22 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -648,19 +672,22 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
/* rpm starts with a taken reference, we can release it now */ /* The transport always starts with a taken reference, we can
* release it now if d0i3 is supported */
if (iwl_mvm_is_d0i3_supported(mvm))
iwl_trans_unref(mvm->trans); iwl_trans_unref(mvm->trans);
iwl_mvm_tof_init(mvm); iwl_mvm_tof_init(mvm);
/* init RSS hash key */ /* init RSS hash key */
get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
return op_mode; return op_mode;
out_unregister: out_unregister:
ieee80211_unregister_hw(mvm->hw); ieee80211_unregister_hw(mvm->hw);
iwl_mvm_leds_exit(mvm); iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free: out_free:
flush_delayed_work(&mvm->fw_dump_wk); flush_delayed_work(&mvm->fw_dump_wk);
iwl_phy_db_free(mvm->phy_db); iwl_phy_db_free(mvm->phy_db);
...@@ -676,9 +703,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) ...@@ -676,9 +703,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int i; int i;
/* If d0i3 is supported, we have released the reference that
* the transport started with, so we should take it back now
* that we are leaving.
*/
if (iwl_mvm_is_d0i3_supported(mvm))
iwl_trans_ref(mvm->trans);
iwl_mvm_leds_exit(mvm); iwl_mvm_leds_exit(mvm);
iwl_mvm_tt_exit(mvm); iwl_mvm_thermal_exit(mvm);
ieee80211_unregister_hw(mvm->hw); ieee80211_unregister_hw(mvm->hw);
...@@ -699,6 +733,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) ...@@ -699,6 +733,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data); kfree(mvm->nvm_sections[i].data);
iwl_free_fw_paging(mvm);
iwl_mvm_tof_clean(mvm); iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw); ieee80211_free_hw(mvm->hw);
...@@ -856,28 +892,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, ...@@ -856,28 +892,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_phy_cmd_mq(mvm, rxb); iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else else
iwl_mvm_rx_common(mvm, rxb, pkt); iwl_mvm_rx_common(mvm, rxb, pkt);
} }
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq;
int q; int q;
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN_ON_ONCE(!mq)) if (WARN_ON_ONCE(!mq))
return; return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"queue %d (mac80211 %d) already stopped\n", "mac80211 %d already stopped\n", q);
queue, q);
continue; continue;
} }
...@@ -897,24 +929,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, ...@@ -897,24 +929,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false); iwl_trans_block_txq_ptrs(mvm->trans, false);
} }
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq; unsigned long mq;
int q;
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->queue_info[queue].hw_queue_to_mac80211; mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_stop_mac_queues(mvm, mq);
}
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
int q;
if (WARN_ON_ONCE(!mq)) if (WARN_ON_ONCE(!mq))
return; return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"queue %d (mac80211 %d) still stopped\n", "mac80211 %d still stopped\n", q);
queue, q);
continue; continue;
} }
...@@ -922,6 +959,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) ...@@ -922,6 +959,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
} }
} }
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq;
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_start_mac_queues(mvm, mq);
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{ {
if (state) if (state)
...@@ -1528,6 +1577,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, ...@@ -1528,6 +1577,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, rxb, queue); iwl_mvm_rx_frame_release(mvm, rxb, queue);
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
pkt->hdr.group_id == DATA_PATH_GROUP))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
else else
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
} }
......
...@@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate) ...@@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate)
if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
rate_str = legacy_rates[rate->index]; rate_str = legacy_rates[rate->index];
else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
(rate->index >= IWL_RATE_MCS_0_INDEX) &&
(rate->index <= IWL_RATE_MCS_9_INDEX)) (rate->index <= IWL_RATE_MCS_9_INDEX))
rate_str = ht_vht_rates[rate->index]; rate_str = ht_vht_rates[rate->index];
else else
...@@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) ...@@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
} }
} }
static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl,
enum rs_action scale_action)
{
struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
scale_action == RS_ACTION_DOWNSCALE)
sta_priv->tlc_amsdu = false;
else
sta_priv->tlc_amsdu = true;
}
/* /*
* setup rate table in uCode * setup rate table in uCode
*/ */
...@@ -2415,6 +2430,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, ...@@ -2415,6 +2430,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
tbl->rate.index = index; tbl->rate.index = index;
if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
rs_set_amsdu_len(mvm, sta, tbl, scale_action);
rs_update_rate_tbl(mvm, sta, lq_sta, tbl); rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
} }
...@@ -3098,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -3098,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sband = hw->wiphy->bands[band]; sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = sta_priv->sta_id; lq_sta->lq.sta_id = sta_priv->sta_id;
sta_priv->tlc_amsdu = false;
for (j = 0; j < LQ_SIZE; j++) for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
...@@ -3657,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, ...@@ -3657,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
ssize_t ret; ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data; struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_mvm_sta *mvmsta =
container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate; struct rs_rate *rate = &tbl->rate;
u32 ss_params; u32 ss_params;
mvm = lq_sta->pers.drv; mvm = lq_sta->pers.drv;
buff = kmalloc(2048, GFP_KERNEL); buff = kmalloc(2048, GFP_KERNEL);
if (!buff) if (!buff)
...@@ -3686,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, ...@@ -3686,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_ht20(rate)) ? "20MHz" : (is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" : (is_ht40(rate)) ? "40MHz" :
(is_ht80(rate)) ? "80Mhz" : "BAD BW"); (is_ht80(rate)) ? "80Mhz" : "BAD BW");
desc += sprintf(buff + desc, " %s %s %s\n", desc += sprintf(buff + desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI", (rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC", (rate->ldpc) ? "LDPC" : "BCC",
(lq_sta->is_agg) ? "AGG on" : ""); (lq_sta->is_agg) ? "AGG on" : "",
(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
} }
desc += sprintf(buff+desc, "last tx rate=0x%X\n", desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags); lq_sta->last_rate_n_flags);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->freq = rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
rx_status->band); rx_status->band);
/*
* TSF as indicated by the fw is at INA time, but mac80211 expects the /* TSF as indicated by the firmware is at INA time */
* TSF at the beginning of the MPDU. rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
*/
/*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
...@@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_update_frame_stats(mvm, rate_n_flags, iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS); rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif #endif
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control)) &&
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
crypt_len, rxb); crypt_len, rxb);
} }
...@@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) ...@@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{ {
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
} }
void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
int i;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
if (WARN_ONCE(pkt_len != sizeof(*notif),
"Received window status notification of wrong size (%u)\n",
pkt_len))
return;
rcu_read_lock();
for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
struct ieee80211_sta *sta;
u8 sta_id, tid;
u64 bitmap;
u32 ssn;
u16 ratid;
u16 received_mpdu;
ratid = le16_to_cpu(notif->ra_tid[i]);
/* check that this TID is valid */
if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
continue;
received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
if (received_mpdu == 0)
continue;
tid = ratid & BA_WINDOW_STATUS_TID_MSK;
/* get the station */
sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
>> BA_WINDOW_STATUS_STA_ID_POS;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR_OR_NULL(sta))
continue;
bitmap = le64_to_cpu(notif->bitmap[i]);
ssn = le32_to_cpu(notif->start_seq_num[i]);
/* update mac80211 with the bitmap for the reordering buffer */
ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
received_mpdu);
}
rcu_read_unlock();
}
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, ...@@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
u16 len, u8 crypt_len, u16 len, u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb) struct iwl_rx_cmd_buffer *rxb)
{ {
unsigned int hdrlen, fraglen; struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
unsigned int headlen, fraglen, pad_len = 0;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
pad_len = 2;
len -= pad_len;
/* If frame is small enough to fit in skb->head, pull it completely. /* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and * If not, only pull ieee80211_hdr (including crypto if present, and
...@@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, ...@@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
* If the latter changes (there are efforts in the standards group * If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023(). * to do so) we should revisit this and ieee80211_data_to_8023().
*/ */
hdrlen = (len <= skb_tailroom(skb)) ? len : headlen = (len <= skb_tailroom(skb)) ? len :
sizeof(*hdr) + crypt_len + 8; hdrlen + crypt_len + 8;
/* The firmware may align the packet to DWORD.
* The padding is inserted after the IV.
* After copying the header + IV skip the padding if
* present before copying packet data.
*/
hdrlen += crypt_len;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen); memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
fraglen = len - hdrlen; memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
headlen - hdrlen);
fraglen = len - headlen;
if (fraglen) { if (fraglen) {
int offset = (void *)hdr + hdrlen - int offset = (void *)hdr + headlen + pad_len -
rxb_addr(rxb) + rxb_offset(rxb); rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
...@@ -285,6 +301,114 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, ...@@ -285,6 +301,114 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
/*
* returns true if a packet outside BA session is a duplicate and
* should be dropped
*/
static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
struct ieee80211_rx_status *rx_status,
struct ieee80211_hdr *hdr,
struct iwl_rx_mpdu_desc *desc)
{
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_rxq_dup_data *dup_data;
u8 baid, tid, sub_frame_idx;
if (WARN_ON(IS_ERR_OR_NULL(sta)))
return false;
baid = (le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
dup_data = &mvm_sta->dup_data[queue];
/*
* Drop duplicate 802.11 retransmissions
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1)) {
rx_status->flag |= RX_FLAG_DUP_VALIDATED;
return false;
}
if (ieee80211_is_data_qos(hdr->frame_control))
/* frame has qos control */
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TID_MASK;
else
tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl &&
dup_data->last_sub_frame[tid] >= sub_frame_idx))
return true;
dup_data->last_seq[tid] = hdr->seq_ctrl;
dup_data->last_sub_frame[tid] = sub_frame_idx;
rx_status->flag |= RX_FLAG_DUP_VALIDATED;
return false;
}
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count)
{
struct iwl_rxq_sync_cmd *cmd;
u32 data_size = sizeof(*cmd) + count;
int ret;
/* should be DWORD aligned */
if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
return -EINVAL;
cmd = kzalloc(data_size, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->rxq_mask = cpu_to_le32(rxq_mask);
cmd->count = cpu_to_le32(count);
cmd->flags = 0;
memcpy(cmd->payload, data, count);
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP,
TRIGGER_RX_QUEUES_NOTIF_CMD),
0, data_size, cmd);
kfree(cmd);
return ret;
}
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rxq_sync_notification *notif;
struct iwl_mvm_internal_rxq_notif *internal_notif;
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
switch (internal_notif->type) {
case IWL_MVM_RXQ_NOTIF_DEL_BA:
/* TODO */
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
}
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue) struct iwl_rx_cmd_buffer *rxb, int queue)
{ {
...@@ -332,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -332,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band); rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status); iwl_mvm_get_signal_strength(mvm, desc, rx_status);
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
rcu_read_lock(); rcu_read_lock();
...@@ -387,6 +513,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -387,6 +513,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (ieee80211_is_data(hdr->frame_control)) if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, desc); iwl_mvm_rx_csum(sta, skb, desc);
if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
rcu_read_unlock();
return;
}
} }
/* /*
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, ...@@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results, iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf, notif->scanned_channels, buf,
sizeof(buf))); sizeof(buf)));
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
ieee80211_sched_scan_results(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
}
} }
void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
...@@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
aborted ? "aborted" : "completed", aborted ? "aborted" : "completed",
...@@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, ...@@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, n_match_sets %d\n", "Sending scheduled scan with filtering, n_match_sets %d\n",
req->n_match_sets); req->n_match_sets);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
return false; return false;
} }
IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
return true; return true;
} }
...@@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, ...@@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
#endif #endif
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
if (iwl_mvm_is_regular_scan(params) && if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE && vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED) params->type != IWL_SCAN_TYPE_FRAGMENTED)
...@@ -1074,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, ...@@ -1074,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
#endif #endif
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
if (iwl_mvm_is_regular_scan(params) && if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE && vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED) params->type != IWL_SCAN_TYPE_FRAGMENTED)
...@@ -1301,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, ...@@ -1301,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
return -EBUSY; return -EBUSY;
} }
/* we don't support "match all" in the firmware */
if (!req->n_match_sets)
return -EOPNOTSUPP;
ret = iwl_mvm_check_running_scans(mvm, type); ret = iwl_mvm_check_running_scans(mvm, type);
if (ret) if (ret)
return ret; return ret;
...@@ -1400,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -1400,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} }
mvm->scan_status &= ~mvm->scan_uid_status[uid]; mvm->scan_status &= ~mvm->scan_uid_status[uid];
...@@ -1434,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, ...@@ -1434,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results, iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf, notif->scanned_channels, buf,
sizeof(buf))); sizeof(buf)));
if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
ieee80211_sched_scan_results(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
}
} }
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
...@@ -1528,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) ...@@ -1528,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
if (uid >= 0 && !mvm->restart_fw) { if (uid >= 0 && !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0; mvm->scan_uid_status[uid] = 0;
} }
...@@ -1549,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) ...@@ -1549,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restart_hw, so do not report if FW is about to be * restart_hw, so do not report if FW is about to be
* restarted. * restarted.
*/ */
if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
!mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
} }
} }
...@@ -1586,6 +1611,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) ...@@ -1586,6 +1611,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
ieee80211_scan_completed(mvm->hw, true); ieee80211_scan_completed(mvm->hw, true);
} else if (notify) { } else if (notify) {
ieee80211_sched_scan_stopped(mvm->hw); ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} }
return ret; return ret;
......
...@@ -280,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -280,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_rxq_dup_data *dup_data;
int i, ret, sta_id; int i, ret, sta_id;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -327,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -327,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
} }
mvm_sta->agg_tids = 0; mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
dup_data = kcalloc(mvm->trans->num_rx_queues,
sizeof(*dup_data),
GFP_KERNEL);
if (!dup_data)
return -ENOMEM;
mvm_sta->dup_data = dup_data;
}
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret) if (ret)
goto err; goto err;
...@@ -508,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -508,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
if (vif->type == NL80211_IFTYPE_STATION && if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) { mvmvif->ap_sta_id == mvm_sta->sta_id) {
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
...@@ -1031,15 +1045,23 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1031,15 +1045,23 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size) struct ieee80211_sta *sta, u16 tid, u8 buf_size,
bool amsdu)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
unsigned int wdg_timeout = unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, fifo, ret; int queue, ret;
u16 ssn; u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvmsta->sta_id,
.tid = tid,
.frame_limit = buf_size,
.aggregate = true,
};
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT); != IWL_MAX_TID_COUNT);
...@@ -1051,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1051,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->state = IWL_AGG_ON; tid_data->state = IWL_AGG_ON;
mvmsta->agg_tids |= BIT(tid); mvmsta->agg_tids |= BIT(tid);
tid_data->ssn = 0xffff; tid_data->ssn = 0xffff;
tid_data->amsdu_in_ampdu_allowed = amsdu;
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
iwl_mvm_enable_agg_txq(mvm, queue, iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, ssn, &cfg, wdg_timeout);
mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret) if (ret)
......
...@@ -258,8 +258,7 @@ enum iwl_mvm_agg_state { ...@@ -258,8 +258,7 @@ enum iwl_mvm_agg_state {
* This is basically (last acked packet++). * This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @reduced_tpc: Reduced tx power. Holds the data between the * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @state: state of the BA agreement establishment / tear down. * @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session * @txq_id: Tx queue used by the BA session
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
...@@ -273,7 +272,7 @@ struct iwl_mvm_tid_data { ...@@ -273,7 +272,7 @@ struct iwl_mvm_tid_data {
u16 next_reclaimed; u16 next_reclaimed;
/* The rest is Tx AGG related */ /* The rest is Tx AGG related */
u32 rate_n_flags; u32 rate_n_flags;
u8 reduced_tpc; bool amsdu_in_ampdu_allowed;
enum iwl_mvm_agg_state state; enum iwl_mvm_agg_state state;
u16 txq_id; u16 txq_id;
u16 ssn; u16 ssn;
...@@ -293,6 +292,16 @@ struct iwl_mvm_key_pn { ...@@ -293,6 +292,16 @@ struct iwl_mvm_key_pn {
} ____cacheline_aligned_in_smp q[]; } ____cacheline_aligned_in_smp q[];
}; };
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
* @last_sub_frame: last subframe packet
*/
struct iwl_mvm_rxq_dup_data {
__le16 last_seq[IWL_MAX_TID_COUNT + 1];
u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
} ____cacheline_aligned_in_smp;
/** /**
* struct iwl_mvm_sta - representation of a station in the driver * struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
...@@ -311,6 +320,7 @@ struct iwl_mvm_key_pn { ...@@ -311,6 +320,7 @@ struct iwl_mvm_key_pn {
* @tx_protection: reference counter for controlling the Tx protection. * @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection? * @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled? * @disable_tx: is tx to this STA disabled?
* @tlc_amsdu: true if A-MSDU is allowed
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleep_tx_count: the number of frames that we told the firmware to let out * @sleep_tx_count: the number of frames that we told the firmware to let out
* even when that station is asleep. This is useful in case the queue * even when that station is asleep. This is useful in case the queue
...@@ -318,6 +328,7 @@ struct iwl_mvm_key_pn { ...@@ -318,6 +328,7 @@ struct iwl_mvm_key_pn {
* we are sending frames from an AMPDU queue and there was a hole in * we are sending frames from an AMPDU queue and there was a hole in
* the BA window. To be used for UAPSD only. * the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures * @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
* *
* When mac80211 creates a station it reserves some space (hw->sta_data_size) * When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that * in the structure for use by driver. This structure is placed in that
...@@ -337,14 +348,15 @@ struct iwl_mvm_sta { ...@@ -337,14 +348,15 @@ struct iwl_mvm_sta {
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta; struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
/* Temporary, until the new TLC will control the Tx protection */ /* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection; s8 tx_protection;
bool tt_tx_protection; bool tt_tx_protection;
bool disable_tx; bool disable_tx;
bool tlc_amsdu;
u8 agg_tids; u8 agg_tids;
u8 sleep_tx_count; u8 sleep_tx_count;
}; };
...@@ -405,7 +417,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -405,7 +417,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn); struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size); struct ieee80211_sta *sta, u16 tid, u8 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid); struct ieee80211_sta *sta, u16 tid);
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
......
This diff is collapsed.
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include <linux/ieee80211.h> #include <linux/ieee80211.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <net/ip.h>
#include "iwl-trans.h" #include "iwl-trans.h"
#include "iwl-eeprom-parse.h" #include "iwl-eeprom-parse.h"
...@@ -182,7 +183,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -182,7 +183,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->tx_flags = cpu_to_le32(tx_flags); tx_cmd->tx_flags = cpu_to_le32(tx_flags);
/* Total # bytes to be transmitted */ /* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)info->driver_data[0]);
tx_cmd->next_frame_len = 0; tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id; tx_cmd->sta_id = sta_id;
...@@ -372,6 +374,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -372,6 +374,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info->hw_queue != info->control.vif->cab_queue))) info->hw_queue != info->control.vif->cab_queue)))
return -1; return -1;
/* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)0;
/* /*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
* in 2 different types of vifs, P2P & STATION. P2P uses the offchannel * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
...@@ -425,36 +430,206 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) ...@@ -425,36 +430,206 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1; return -1;
} }
/*
* Increase the pending frames counter, so that later when a reply comes
* in and the counter is decreased - we don't start getting negative
* values.
* Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta
*/
atomic_inc(&mvm->pending_frames[sta_id]);
return 0; return 0;
} }
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso, #ifdef CONFIG_INET
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct sk_buff_head *mpdus_skb) struct sk_buff_head *mpdus_skb)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int mss = skb_shinfo(skb)->gso_size;
struct sk_buff *tmp, *next; struct sk_buff *tmp, *next;
char cb[sizeof(skb_gso->cb)]; char cb[sizeof(skb->cb)];
unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
u8 *qc, tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
!mvmsta->tlc_amsdu) {
num_subframes = 1;
pad = 0;
goto segment;
}
/*
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
num_subframes = 1;
pad = 0;
goto segment;
}
max_amsdu_len = sta->max_amsdu_len;
dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
/*
* Don't send an AMSDU that will be longer than the TXF.
* Add a security margin of 256 for the TX command + headers.
* We also want to have the start of the next packet inside the
* fifo to be able to send bursts.
*/
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
if (dbg_max_amsdu_len)
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
dbg_max_amsdu_len);
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
* supported. This is a spec requirement (IEEE 802.11-2015
* section 8.7.3 NOTE 3).
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
!sta->vht_cap.vht_supported)
max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
/* Sub frame header + SNAP + IP header + TCP header + MSS */
subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
pad = (4 - subf_len) & 0x3;
/*
* If we have N subframes in the A-MSDU, then the A-MSDU's size is
* N * subf_len + (N - 1) * pad.
*/
num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
if (num_subframes > 1)
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
/*
* Make sure we have enough TBs for the A-MSDU:
* 2 for each subframe
* 1 more for each fragment
* 1 more for the potential data in the header
*/
num_subframes =
min_t(unsigned int, num_subframes,
(mvm->trans->max_skb_frags - 1 -
skb_shinfo(skb)->nr_frags) / 2);
/* This skb fits in one single A-MSDU */
if (num_subframes * mss >= tcp_payload_len) {
/*
* Compute the length of all the data added for the A-MSDU.
* This will be used to compute the length to write in the TX
* command. We have: SNAP + IP + TCP for n -1 subframes and
* ETH header for n subframes. Note that the original skb
* already had one set of SNAP / IP / TCP headers.
*/
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
info = IEEE80211_SKB_CB(skb);
amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad);
/* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
__skb_queue_tail(mpdus_skb, skb);
return 0;
}
/*
* Trick the segmentation function to make it
* create SKBs that can fit into one A-MSDU.
*/
segment:
skb_shinfo(skb)->gso_size = num_subframes * mss;
memcpy(cb, skb->cb, sizeof(cb));
memcpy(cb, skb_gso->cb, sizeof(cb)); next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
next = skb_gso_segment(skb_gso, 0); skb_shinfo(skb)->gso_size = mss;
if (IS_ERR(next)) if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL; return -EINVAL;
else if (next) else if (next)
consume_skb(skb_gso); consume_skb(skb);
while (next) { while (next) {
tmp = next; tmp = next;
next = tmp->next; next = tmp->next;
memcpy(tmp->cb, cb, sizeof(tmp->cb)); memcpy(tmp->cb, cb, sizeof(tmp->cb));
/*
* Compute the length of all the data added for the A-MSDU.
* This will be used to compute the length to write in the TX
* command. We have: SNAP + IP + TCP for n -1 subframes and
* ETH header for n subframes.
*/
tcp_payload_len = skb_tail_pointer(tmp) -
skb_transport_header(tmp) -
tcp_hdrlen(tmp) + tmp->data_len;
if (ipv4)
ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
if (tcp_payload_len > mss) {
num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
info = IEEE80211_SKB_CB(tmp);
amsdu_add = num_subframes * sizeof(struct ethhdr) +
(num_subframes - 1) * (snap_ip_tcp + pad);
info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
skb_shinfo(tmp)->gso_size = mss;
} else {
qc = ieee80211_get_qos_ctl((void *)tmp->data);
if (ipv4)
ip_send_check(ip_hdr(tmp));
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
skb_shinfo(tmp)->gso_size = 0;
}
tmp->prev = NULL; tmp->prev = NULL;
tmp->next = NULL; tmp->next = NULL;
__skb_queue_tail(mpdus_skb, tmp); __skb_queue_tail(mpdus_skb, tmp);
i++;
} }
return 0; return 0;
} }
#else /* CONFIG_INET */
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct sk_buff_head *mpdus_skb)
{
/* Impossible to get TSO with CONFIG_INET */
WARN_ON(1);
return -1;
}
#endif
/* /*
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
...@@ -560,6 +735,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -560,6 +735,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct sk_buff_head mpdus_skbs; struct sk_buff_head mpdus_skbs;
unsigned int payload_len; unsigned int payload_len;
int ret; int ret;
...@@ -570,6 +746,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -570,6 +746,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1; return -1;
/* This holds the amsdu headers length */
info->driver_data[0] = (void *)(uintptr_t)0;
if (!skb_is_gso(skb)) if (!skb_is_gso(skb))
return iwl_mvm_tx_mpdu(mvm, skb, sta); return iwl_mvm_tx_mpdu(mvm, skb, sta);
...@@ -589,7 +768,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -589,7 +768,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
return ret; return ret;
while (!skb_queue_empty(&mpdus_skbs)) { while (!skb_queue_empty(&mpdus_skbs)) {
struct sk_buff *skb = __skb_dequeue(&mpdus_skbs); skb = __skb_dequeue(&mpdus_skbs);
ret = iwl_mvm_tx_mpdu(mvm, skb, sta); ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
if (ret) { if (ret) {
......
...@@ -811,6 +811,45 @@ static int iwl_pci_runtime_resume(struct device *device) ...@@ -811,6 +811,45 @@ static int iwl_pci_runtime_resume(struct device *device)
return 0; return 0;
} }
static int iwl_pci_system_prepare(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
/* This is called before entering system suspend and before
* the runtime resume is called. Set the suspending flag to
* prevent the wakelock from being taken.
*/
trans->suspending = true;
/* Wake the device up from runtime suspend before going to
* platform suspend. This is needed because we don't know
* whether wowlan any is set and, if it's not, mac80211 will
* disconnect (in which case, we can't be in D0i3).
*/
pm_runtime_resume(device);
return 0;
}
static void iwl_pci_system_complete(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
IWL_DEBUG_RPM(trans, "completing system suspend\n");
/* This is called as a counterpart to the prepare op. It is
* called either when suspending fails or when suspend
* completed successfully. Now there's no risk of grabbing
* the wakelock anymore, so we can release the suspending
* flag.
*/
trans->suspending = false;
}
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #endif /* CONFIG_IWLWIFI_PCIE_RTPM */
static const struct dev_pm_ops iwl_dev_pm_ops = { static const struct dev_pm_ops iwl_dev_pm_ops = {
...@@ -820,6 +859,8 @@ static const struct dev_pm_ops iwl_dev_pm_ops = { ...@@ -820,6 +859,8 @@ static const struct dev_pm_ops iwl_dev_pm_ops = {
SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
iwl_pci_runtime_resume, iwl_pci_runtime_resume,
NULL) NULL)
.prepare = iwl_pci_system_prepare,
.complete = iwl_pci_system_complete,
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #endif /* CONFIG_IWLWIFI_PCIE_RTPM */
}; };
......
...@@ -336,6 +336,14 @@ struct iwl_tso_hdr_page { ...@@ -336,6 +336,14 @@ struct iwl_tso_hdr_page {
* @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_phys: physical address of the buffer for the firmware monitor
* @fw_mon_page: points to the first page of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor
* @fw_mon_size: size of the buffer for the firmware monitor * @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
* @allocated_vector: the number of interrupt vector allocated by the OS
* @default_irq_num: default irq for non rx interrupt
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
*/ */
struct iwl_trans_pcie { struct iwl_trans_pcie {
struct iwl_rxq *rxq; struct iwl_rxq *rxq;
...@@ -402,6 +410,15 @@ struct iwl_trans_pcie { ...@@ -402,6 +410,15 @@ struct iwl_trans_pcie {
dma_addr_t fw_mon_phys; dma_addr_t fw_mon_phys;
struct page *fw_mon_page; struct page *fw_mon_page;
u32 fw_mon_size; u32 fw_mon_size;
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
u32 allocated_vector;
u32 default_irq_num;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
}; };
static inline struct iwl_trans_pcie * static inline struct iwl_trans_pcie *
...@@ -430,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); ...@@ -430,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX * RX
******************************************************/ ******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans); int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans);
...@@ -485,8 +505,10 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans); ...@@ -485,8 +505,10 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/ ******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans) static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{ {
clear_bit(STATUS_INT_ENABLED, &trans->status); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
clear_bit(STATUS_INT_ENABLED, &trans->status);
if (!trans_pcie->msix_enabled) {
/* disable interrupts from uCode/NIC to host */ /* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000); iwl_write32(trans, CSR_INT_MASK, 0x00000000);
...@@ -494,6 +516,13 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans) ...@@ -494,6 +516,13 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
* from uCode or flow handler (Rx/Tx DMA) */ * from uCode or flow handler (Rx/Tx DMA) */
iwl_write32(trans, CSR_INT, 0xffffffff); iwl_write32(trans, CSR_INT, 0xffffffff);
iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
} else {
/* disable all the interrupt we might use */
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
trans_pcie->fh_init_mask);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
trans_pcie->hw_init_mask);
}
IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
} }
...@@ -503,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans) ...@@ -503,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &trans->status); set_bit(STATUS_INT_ENABLED, &trans->status);
if (!trans_pcie->msix_enabled) {
trans_pcie->inta_mask = CSR_INI_SET_MASK; trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
/*
* fh/hw_mask keeps all the unmasked causes.
* Unlike msi, in msix cause is enabled when it is unset.
*/
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
~trans_pcie->fh_mask);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
~trans_pcie->hw_mask);
}
}
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
trans_pcie->hw_mask = msk;
}
static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
trans_pcie->fh_mask = msk;
} }
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
...@@ -512,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) ...@@ -512,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
if (!trans_pcie->msix_enabled) {
trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
trans_pcie->hw_init_mask);
iwl_enable_fh_int_msk_msix(trans,
MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
}
} }
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
...@@ -521,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) ...@@ -521,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
if (!trans_pcie->msix_enabled) {
trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
trans_pcie->fh_init_mask);
iwl_enable_hw_int_msk_msix(trans,
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
} }
static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void iwl_wake_queue(struct iwl_trans *trans,
......
...@@ -783,16 +783,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) ...@@ -783,16 +783,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Single frame mode * Single frame mode
* Rx buffer size 4 or 8k or 12k * Rx buffer size 4 or 8k or 12k
* Min RB size 4 or 8 * Min RB size 4 or 8
* Drop frames that exceed RB size
* 512 RBDs * 512 RBDs
*/ */
iwl_write_prph(trans, RFH_RXF_DMA_CFG, iwl_write_prph(trans, RFH_RXF_DMA_CFG,
RFH_DMA_EN_ENABLE_VAL | RFH_DMA_EN_ENABLE_VAL |
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
RFH_RXF_DMA_MIN_RB_4_8 | RFH_RXF_DMA_MIN_RB_4_8 |
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
RFH_RXF_DMA_RBDCB_SIZE_512); RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
* Set RX DMA chunk size to 128 bit
* Default queue is 0
*/
iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
RFH_GEN_CFG_RB_CHUNK_SIZE |
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP); RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
/* Set interrupt coalescing timer to default (2048 usecs) */ /* Set interrupt coalescing timer to default (2048 usecs) */
...@@ -1135,10 +1145,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1135,10 +1145,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* /*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/ */
static void iwl_pcie_rx_handle(struct iwl_trans *trans) static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq[0]; struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
u32 r, i, j, count = 0; u32 r, i, j, count = 0;
bool emergency = false; bool emergency = false;
...@@ -1149,9 +1159,12 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) ...@@ -1149,9 +1159,12 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read; i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
r &= (rxq->queue_size - 1);
/* Rx interrupt, but nothing sent from uCode */ /* Rx interrupt, but nothing sent from uCode */
if (i == r) if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) { while (i != r) {
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
...@@ -1164,15 +1177,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) ...@@ -1164,15 +1177,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
* used_bd is a 32 bit but only 12 are used to retrieve * used_bd is a 32 bit but only 12 are used to retrieve
* the vid * the vid
*/ */
u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
"Invalid rxb index from HW %u\n", (u32)vid))
goto out;
rxb = trans_pcie->global_table[vid]; rxb = trans_pcie->global_table[vid];
} else { } else {
rxb = rxq->queue[i]; rxb = rxq->queue[i];
rxq->queue[i] = NULL; rxq->queue[i] = NULL;
} }
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
i = (i + 1) & (rxq->queue_size - 1); i = (i + 1) & (rxq->queue_size - 1);
...@@ -1235,7 +1251,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) ...@@ -1235,7 +1251,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
goto restart; goto restart;
} }
} }
out:
/* Backtrack one entry */ /* Backtrack one entry */
rxq->read = i; rxq->read = i;
spin_unlock(&rxq->lock); spin_unlock(&rxq->lock);
...@@ -1259,6 +1275,54 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) ...@@ -1259,6 +1275,54 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
napi_gro_flush(&rxq->napi, false); napi_gro_flush(&rxq->napi, false);
} }
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
{
u8 queue = entry->entry;
struct msix_entry *entries = entry - queue;
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
}
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
struct msix_entry *entry)
{
/*
* Before sending the interrupt the HW disables it to prevent
* a nested interrupt. This is done by writing 1 to the corresponding
* bit in the mask register. After handling the interrupt, it should be
* re-enabled by clearing this bit. This register is defined as
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only.
*/
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
if (WARN_ON(entry->entry >= trans->num_rx_queues))
return IRQ_NONE;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
local_bh_disable();
iwl_pcie_rx_handle(trans, entry->entry);
local_bh_enable();
iwl_pcie_clear_irq(trans, entry);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
}
/* /*
* iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
*/ */
...@@ -1589,7 +1653,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) ...@@ -1589,7 +1653,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rx++; isr_stats->rx++;
local_bh_disable(); local_bh_disable();
iwl_pcie_rx_handle(trans); iwl_pcie_rx_handle(trans, 0);
local_bh_enable(); local_bh_enable();
} }
...@@ -1732,3 +1796,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data) ...@@ -1732,3 +1796,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data)
return IRQ_WAKE_THREAD; return IRQ_WAKE_THREAD;
} }
irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
{
return IRQ_WAKE_THREAD;
}
irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock(&trans_pcie->irq_lock);
inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
* Clear causes registers to avoid being handling the same cause.
*/
iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock(&trans_pcie->irq_lock);
if (unlikely(!(inta_fh | inta_hw))) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_NONE;
}
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
isr_stats->tx++;
/*
* Wake up uCode load routine,
* now that load is complete
*/
trans_pcie->ucode_write_complete = true;
wake_up(&trans_pcie->ucode_write_waitq);
}
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
isr_stats->sw++;
iwl_pcie_irq_handle_error(trans);
}
/* After checking FH register check HW register */
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans,
"ISR inta_hw 0x%08x, enabled 0x%08x\n",
inta_hw,
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
/* Alive notification via Rx interrupt will do the real work */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
}
/* uCode wakes up after power-down sleep */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
}
/* Chip got too hot and stopped itself */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
IWL_ERR(trans, "Microcode CT kill error detected.\n");
isr_stats->ctkill++;
}
/* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
bool hw_rfkill;
hw_rfkill = iwl_is_rfkill_set(trans);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
mutex_lock(&trans_pcie->mutex);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL, &trans->status);
}
}
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,
"Hardware error detected. Restarting.\n");
isr_stats->hw++;
iwl_pcie_irq_handle_error(trans);
}
iwl_pcie_clear_irq(trans, entry);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
}
...@@ -1062,10 +1062,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, ...@@ -1062,10 +1062,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark && if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) { test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head skbs; struct sk_buff_head overflow_skbs;
__skb_queue_head_init(&skbs); __skb_queue_head_init(&overflow_skbs);
skb_queue_splice_init(&txq->overflow_q, &skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
/* /*
* This is tricky: we are in reclaim path which is non * This is tricky: we are in reclaim path which is non
...@@ -1076,8 +1076,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, ...@@ -1076,8 +1076,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
*/ */
spin_unlock_bh(&txq->lock); spin_unlock_bh(&txq->lock);
while (!skb_queue_empty(&skbs)) { while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&skbs); struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
struct iwl_device_cmd *dev_cmd = struct iwl_device_cmd *dev_cmd =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment