Commit c2da9133 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2016-08-30-2' of...

Merge tag 'iwlwifi-next-for-kalle-2016-08-30-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* Preparation for new HW continues;
* Some DQA improvements;
* Support for GMAC;
parents 60747ef4 76f8c0e1
...@@ -72,15 +72,15 @@ ...@@ -72,15 +72,15 @@
#define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000 #define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-" #define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-" #define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9000_MODULE_FIRMWARE(api) \ #define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode" IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \ #define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode" IWL9260_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260LC_MODULE_FIRMWARE(api) \ #define IWL9000LC_MODULE_FIRMWARE(api) \
IWL9260LC_FW_PRE "-" __stringify(api) ".ucode" IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10 #define NVM_HW_SECTION_NUM_FAMILY_9000 10
...@@ -146,41 +146,62 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -146,41 +146,62 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mac_addr_from_csr = true, \ .mac_addr_from_csr = true, \
.rf_id = true .rf_id = true
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9260_2ac_cfg = { const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260", .name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260_FW_PRE, .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params, .ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION, .nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
}; };
/* /*
* TODO the struct below is for internal testing only this should be * TODO the struct below is for internal testing only this should be
* removed by EO 2016~ * removed by EO 2016~
*/ */
const struct iwl_cfg iwl9260lc_2ac_cfg = { const struct iwl_cfg iwl9000lc_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260", .name = "Intel(R) Dual Band Wireless AC 9000",
.fw_name_pre = IWL9260LC_FW_PRE, .fw_name_pre = IWL9000LC_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params, .ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION, .nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; .integrated = true,
const struct iwl_cfg iwl5165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 5165",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
}; };
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
...@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg; ...@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9000lc_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260lc_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg; extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
......
...@@ -77,7 +77,6 @@ ...@@ -77,7 +77,6 @@
*/ */
#define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000) #define FH_MEM_UPPER_BOUND (0x2000)
#define TFH_MEM_LOWER_BOUND (0xA06000)
/** /**
* Keep-Warm (KW) buffer base address. * Keep-Warm (KW) buffer base address.
...@@ -120,7 +119,7 @@ ...@@ -120,7 +119,7 @@
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
/* a000 TFD table address, 64 bit */ /* a000 TFD table address, 64 bit */
#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00) #define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */ /* Find TFD CB base pointer for given queue */
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
...@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* In case of DRAM read address which is not aligned to 128B, the TFH will * In case of DRAM read address which is not aligned to 128B, the TFH will
* enable transfer size which doesn't cross 64B DRAM address boundary. * enable transfer size which doesn't cross 64B DRAM address boundary.
*/ */
#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40) #define TFH_TRANSFER_MODE (0x1F40)
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc #define TFH_TRANSFER_MAX_PENDING_REQ 0xc
#define TFH_CHUNK_SIZE_128 BIT(8) #define TFH_CHUNK_SIZE_128 BIT(8)
#define TFH_CHUNK_SPLIT_MODE BIT(10) #define TFH_CHUNK_SPLIT_MODE BIT(10)
...@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* the start of the TFD first TB. * the start of the TFD first TB.
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
*/ */
#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48) #define TFH_TXCMD_UPDATE_CFG (0x1F48)
/* /*
* Controls TX DMA operation * Controls TX DMA operation
* *
...@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* set to 1 - interrupt is sent to the driver * set to 1 - interrupt is sent to the driver
* Bit 0: Indicates the snoop configuration * Bit 0: Indicates the snoop configuration
*/ */
#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60) #define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
#define TFH_SRV_DMA_SNOOP BIT(0) #define TFH_SRV_DMA_SNOOP BIT(0)
#define TFH_SRV_DMA_TO_DRIVER BIT(24) #define TFH_SRV_DMA_TO_DRIVER BIT(24)
#define TFH_SRV_DMA_START BIT(31) #define TFH_SRV_DMA_START BIT(31)
/* Defines the DMA SRAM write start address to transfer a data block */ /* Defines the DMA SRAM write start address to transfer a data block */
#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64) #define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
/* Defines the 64bits DRAM start address to read the DMA data block from */ /* Defines the 64bits DRAM start address to read the DMA data block from */
#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68) #define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
/* /*
* Defines the number of bytes to transfer from DRAM to SRAM. * Defines the number of bytes to transfer from DRAM to SRAM.
* Note that this register may be configured with non-dword aligned size. * Note that this register may be configured with non-dword aligned size.
*/ */
#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70) #define TFH_SRV_DMA_CHNL0_BC (0x1F70)
/** /**
* Rx SRAM Control and Status Registers (RSCSR) * Rx SRAM Control and Status Registers (RSCSR)
......
...@@ -302,22 +302,17 @@ ...@@ -302,22 +302,17 @@
#define OSC_CLK_FORCE_CONTROL (0x8) #define OSC_CLK_FORCE_CONTROL (0x8)
#define FH_UCODE_LOAD_STATUS (0x1AF0) #define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
};
#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38) /*
#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C) * Replacing FH_UCODE_LOAD_STATUS
* This register is writen by driver and is read by uCode during boot flow.
* Note this address is cleared after MAC reset.
*/
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
......
...@@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd { ...@@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd {
u8 reserved[3]; u8 reserved[3];
} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
/**
* struct iwl_mvm_mgmt_mcast_key_cmd_v1
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
* @igtk:
* @k1: unused
* @k2: unused
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
*/
struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
__le32 ctrl_flags;
u8 igtk[16];
u8 k1[16];
u8 k2[16];
__le32 key_id;
__le32 sta_id;
__le64 receive_seq_cnt;
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
/** /**
* struct iwl_mvm_mgmt_mcast_key_cmd * struct iwl_mvm_mgmt_mcast_key_cmd
* ( MGMT_MCAST_KEY = 0x1f ) * ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag * @ctrl_flags: %iwl_sta_key_flag
* @IGTK: * @igtk: IGTK master key
* @K1: unused
* @K2: unused
* @sta_id: station ID that support IGTK * @sta_id: station ID that support IGTK
* @key_id: * @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check * @receive_seq_cnt: initial RSC/PN needed for replay check
*/ */
struct iwl_mvm_mgmt_mcast_key_cmd { struct iwl_mvm_mgmt_mcast_key_cmd {
__le32 ctrl_flags; __le32 ctrl_flags;
u8 IGTK[16]; u8 igtk[32];
u8 K1[16];
u8 K2[16];
__le32 key_id; __le32 key_id;
__le32 sta_id; __le32 sta_id;
__le64 receive_seq_cnt; __le64 receive_seq_cnt;
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
struct iwl_mvm_wep_key { struct iwl_mvm_wep_key {
u8 key_index; u8 key_index;
......
...@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) ...@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff; tx_resp->frame_count) & 0xfff;
} }
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
SCD_CFG_ENABLE_QUEUE = 0x1,
SCD_CFG_UPDATE_QUEUE_TID = 0x2,
};
/** /**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token: * @token:
* @sta_id: station id * @sta_id: station id
* @tid: * @tid:
* @scd_queue: scheduler queue to confiug * @scd_queue: scheduler queue to confiug
* @enable: 1 queue enable, 0 queue disable * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
* Value is one of %iwl_scd_cfg_actions options
* @aggregate: 1 aggregated queue, 0 otherwise * @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: %enum iwl_mvm_tx_fifo * @tx_fifo: %enum iwl_mvm_tx_fifo
* @window: BA window size * @window: BA window size
...@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd { ...@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd {
u8 sta_id; u8 sta_id;
u8 tid; u8 tid;
u8 scd_queue; u8 scd_queue;
u8 enable; u8 action;
u8 aggregate; u8 aggregate;
u8 tx_fifo; u8 tx_fifo;
u8 window; u8 window;
......
...@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd { ...@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd {
* @block_size: the block size in powers of 2 * @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command. * @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side * @device_phy_addr: virtual addresses from device side
* 32 bit address for API version 1, 64 bit address for API version 2.
*/ */
struct iwl_fw_paging_cmd { struct iwl_fw_paging_cmd {
__le32 flags; __le32 flags;
__le32 block_size; __le32 block_size;
__le32 block_num; __le32 block_num;
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; union {
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
__le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
} device_phy_addr;
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
/* /*
* Fw items ID's * Fw items ID's
......
...@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm, ...@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
/* send paging cmd to FW in case CPU2 has paging image */ /* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{ {
int blk_idx; struct iwl_fw_paging_cmd paging_cmd = {
__le32 dev_phy_addr;
struct iwl_fw_paging_cmd fw_paging_cmd = {
.flags = .flags =
cpu_to_le32(PAGING_CMD_IS_SECURED | cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED | PAGING_CMD_IS_ENABLED |
...@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) ...@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk), .block_num = cpu_to_le32(mvm->num_of_paging_blk),
}; };
int blk_idx, size = sizeof(paging_cmd);
/* A bit hard coded - but this is the old API and will be deprecated */
if (!iwl_mvm_has_new_tx_api(mvm))
size -= NUM_OF_FW_PAGING_BLOCKS * 4;
/* loop for for all paging blocks + CSS block */ /* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
dev_phy_addr = dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
PAGE_2_EXP_SIZE); addr = addr >> PAGE_2_EXP_SIZE;
fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
if (iwl_mvm_has_new_tx_api(mvm)) {
__le64 phy_addr = cpu_to_le64(addr);
paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
} else {
__le32 phy_addr = cpu_to_le32(addr);
paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
}
} }
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0), IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(fw_paging_cmd), &fw_paging_cmd); 0, size, &paging_cmd);
} }
/* /*
......
...@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4); BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers; hw->wiphy->cipher_suites = mvm->ciphers;
...@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->ciphers[hw->wiphy->n_cipher_suites] = mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC; WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++; hw->wiphy->n_cipher_suites++;
if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_128;
hw->wiphy->n_cipher_suites++;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_256;
hw->wiphy->n_cipher_suites++;
}
} }
/* currently FW API supports only one optional cipher scheme */ /* currently FW API supports only one optional cipher scheme */
...@@ -2746,6 +2754,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ...@@ -2746,6 +2754,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break; break;
case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break; break;
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
...@@ -2779,9 +2789,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ...@@ -2779,9 +2789,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0; * GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy * on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same. * we don't support them for RX, so do the same.
* CMAC in AP/IBSS modes must be done in software. * CMAC/GMAC in AP/IBSS modes must be done in software.
*/ */
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
else else
ret = 0; ret = 0;
......
...@@ -697,6 +697,10 @@ struct iwl_mvm_baid_data { ...@@ -697,6 +697,10 @@ struct iwl_mvm_baid_data {
* it. In this state, when a new queue is needed to be allocated but no * it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to * such free queue exists, an inactive queue might be freed and given to
* the new RA/TID. * the new RA/TID.
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
* This is the state of a queue that has had traffic pass through it, but
* needs to be reconfigured for some reason, e.g. the queue needs to
* become unshared and aggregations re-enabled on.
*/ */
enum iwl_mvm_queue_status { enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_FREE,
...@@ -704,10 +708,11 @@ enum iwl_mvm_queue_status { ...@@ -704,10 +708,11 @@ enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED, IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE, IWL_MVM_QUEUE_INACTIVE,
IWL_MVM_QUEUE_RECONFIGURING,
}; };
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
#define IWL_MVM_NUM_CIPHERS 8 #define IWL_MVM_NUM_CIPHERS 10
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
...@@ -767,6 +772,7 @@ struct iwl_mvm { ...@@ -767,6 +772,7 @@ struct iwl_mvm {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */ bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */ /* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
...@@ -1122,6 +1128,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) ...@@ -1122,6 +1128,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
} }
static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
{
return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
(queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
}
static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
{
return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
(queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{ {
bool nvm_lar = mvm->nvm_data->lar_enabled; bool nvm_lar = mvm->nvm_data->lar_enabled;
...@@ -1192,6 +1210,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) ...@@ -1192,6 +1210,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
} }
static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
{
/* TODO - replace with TLV once defined */
return mvm->trans->cfg->use_tfh;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{ {
#ifdef CONFIG_THERMAL #ifdef CONFIG_THERMAL
......
...@@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_CCMP_PN_LEN) <= 0) IEEE80211_CCMP_PN_LEN) <= 0)
return -1; return -1;
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); if (!(stats->flag & RX_FLAG_AMSDU_MORE))
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
stats->flag |= RX_FLAG_PN_VALIDATED; stats->flag |= RX_FLAG_PN_VALIDATED;
return 0; return 0;
...@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
if (!(desc->amsdu_info &
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
} }
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid); iwl_mvm_agg_rx_received(mvm, baid);
......
...@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, ...@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue; continue;
/* Don't try and take queues being reconfigured */
if (mvm->queue_info[queue].status ==
IWL_MVM_QUEUE_RECONFIGURING)
continue;
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
} }
...@@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, ...@@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
queue = ac_to_queue[IEEE80211_AC_VO]; queue = ac_to_queue[IEEE80211_AC_VO];
/* Make sure queue found (or not) is legal */ /* Make sure queue found (or not) is legal */
if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
(queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
(queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
IWL_ERR(mvm, "No DATA queues available to share\n"); IWL_ERR(mvm, "No DATA queues available to share\n");
queue = -ENOSPC; return -ENOSPC;
}
/* Make sure the queue isn't in the middle of being reconfigured */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
IWL_ERR(mvm,
"TXQ %d is in the middle of re-config - try again\n",
queue);
return -EBUSY;
} }
return queue; return queue;
} }
/* /*
* If a given queue has a higher AC than the TID stream that is being added to * If a given queue has a higher AC than the TID stream that is being compared
* it, the queue needs to be redirected to the lower AC. This function does that * to, the queue needs to be redirected to the lower AC. This function does that
* in such a case, otherwise - if no redirection required - it does nothing, * in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true. * unless the %force param is true.
*/ */
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout, int ac, int ssn, unsigned int wdg_timeout,
bool force) bool force)
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .action = SCD_CFG_DISABLE_QUEUE,
}; };
bool shared_queue; bool shared_queue;
unsigned long mq; unsigned long mq;
...@@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->queue_info[queue].hw_queue_to_mac80211; mq = mvm->queue_info[queue].hw_queue_to_mac80211;
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]); queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop MAC queues and wait for this queue to empty */ /* Stop MAC queues and wait for this queue to empty */
...@@ -580,6 +592,11 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -580,6 +592,11 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn, wdg_timeout); ssn, wdg_timeout);
/* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
/* Redirect to lower AC */ /* Redirect to lower AC */
...@@ -709,7 +726,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -709,7 +726,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (WARN_ON(queue <= 0)) { if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
tid, cfg.sta_id); tid, cfg.sta_id);
return -ENOSPC; return queue;
} }
/* /*
...@@ -728,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -728,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (using_inactive_queue) { if (using_inactive_queue) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .action = SCD_CFG_DISABLE_QUEUE,
}; };
u8 ac; u8 ac;
...@@ -738,11 +755,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -738,11 +755,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
ac = mvm->queue_info[queue].mac80211_ac; ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac]; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
/* Disable the queue */ /* Disable the queue */
iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, if (disable_agg_tids)
true); iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd); &cmd);
...@@ -758,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -758,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return ret; return ret;
} }
/* If TXQ is allocated to another STA, update removal in FW */
if (cmd.sta_id != mvmsta->sta_id)
iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
} }
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -827,6 +850,119 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -827,6 +850,119 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return ret; return ret;
} }
static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_UPDATE_QUEUE_TID,
};
s8 sta_id;
int tid;
unsigned long tid_bitmap;
int ret;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
return;
/* Find any TID for queue */
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
cmd.tid = tid;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
queue, ret);
else
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
queue, tid);
}
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
s8 sta_id;
int tid = -1;
unsigned long tid_bitmap;
unsigned int wdg_timeout;
int ssn;
int ret = true;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
/* Find TID for queue, and make sure it is the only one on the queue */
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
if (tid_bitmap != BIT(tid)) {
IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
queue, tid_bitmap);
return;
}
IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
tid);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
tid_to_mac80211_ac[tid], ssn,
wdg_timeout, true);
if (ret) {
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
return;
}
/* If aggs should be turned back on - do it */
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
struct iwl_mvm_add_sta_cmd cmd;
mvmsta->tid_disable_agg &= ~BIT(tid);
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
cmd.sta_id = mvmsta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (!ret) {
IWL_DEBUG_TX_QUEUES(mvm,
"TXQ #%d is now aggregated again\n",
queue);
/* Mark queue intenally as aggregating again */
iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
}
}
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
}
static inline u8 iwl_mvm_tid_to_ac_queue(int tid) static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{ {
if (tid == IWL_MAX_TID_COUNT) if (tid == IWL_MAX_TID_COUNT)
...@@ -894,13 +1030,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) ...@@ -894,13 +1030,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta; struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic; unsigned long deferred_tid_traffic;
int sta_id, tid; int queue, sta_id, tid;
/* Check inactivity of queues */ /* Check inactivity of queues */
iwl_mvm_inactivity_check(mvm); iwl_mvm_inactivity_check(mvm);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
/* Reconfigure queues requiring reconfiguation */
for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
bool reconfig;
bool change_owner;
spin_lock_bh(&mvm->queue_info_lock);
reconfig = (mvm->queue_info[queue].status ==
IWL_MVM_QUEUE_RECONFIGURING);
/*
* We need to take into account a situation in which a TXQ was
* allocated to TID x, and then turned shared by adding TIDs y
* and z. If TID x becomes inactive and is removed from the TXQ,
* ownership must be given to one of the remaining TIDs.
* This is mainly because if TID x continues - a new queue can't
* be allocated for it as long as it is an owner of another TXQ.
*/
change_owner = !(mvm->queue_info[queue].tid_bitmap &
BIT(mvm->queue_info[queue].txq_tid)) &&
(mvm->queue_info[queue].status ==
IWL_MVM_QUEUE_SHARED);
spin_unlock_bh(&mvm->queue_info_lock);
if (reconfig)
iwl_mvm_unshare_queue(mvm, queue);
else if (change_owner)
iwl_mvm_change_queue_owner(mvm, queue);
}
/* Go over all stations with deferred traffic */ /* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames, for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) { IWL_MVM_STATION_COUNT) {
...@@ -963,6 +1128,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -963,6 +1128,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
return 0; return 0;
} }
/*
* In DQA mode, after a HW restart the queues should be allocated as before, in
* order to avoid race conditions when there are shared queues. This function
* does the re-mapping and queue allocation.
*
* Note that re-enabling aggregations isn't done in this function.
*/
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta)
{
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvm_sta->sta_id,
.frame_limit = IWL_FRAME_LIMIT,
};
/* Make sure reserved queue is still marked as such (or allocated) */
mvm->queue_info[mvm_sta->reserved_queue].status =
IWL_MVM_QUEUE_RESERVED;
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
int txq_id = tid_data->txq_id;
int ac;
u8 mac_queue;
if (txq_id == IEEE80211_INVAL_HW_QUEUE)
continue;
skb_queue_head_init(&tid_data->deferred_tx_frames);
ac = tid_to_mac80211_ac[i];
mac_queue = mvm_sta->vif->hw_queue[ac];
cfg.tid = i;
cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
IEEE80211_SEQ_TO_SN(tid_data->seq_number),
&cfg, wdg_timeout);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
}
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
...@@ -985,6 +1205,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -985,6 +1205,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
spin_lock_init(&mvm_sta->lock); spin_lock_init(&mvm_sta->lock);
/* In DQA mode, if this is a HW restart, re-alloc existing queues */
if (iwl_mvm_is_dqa_supported(mvm) &&
test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
goto update_fw;
}
mvm_sta->sta_id = sta_id; mvm_sta->sta_id = sta_id;
mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color); mvmvif->color);
...@@ -1048,6 +1275,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -1048,6 +1275,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
goto err; goto err;
} }
update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret) if (ret)
goto err; goto err;
...@@ -1956,7 +2184,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1956,7 +2184,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO; return -EIO;
} }
spin_lock_bh(&mvm->queue_info_lock); spin_lock(&mvm->queue_info_lock);
/* /*
* Note the possible cases: * Note the possible cases:
...@@ -1967,14 +2195,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1967,14 +2195,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* non-DQA mode, since the TXQ hasn't yet been allocated * non-DQA mode, since the TXQ hasn't yet been allocated
*/ */
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
if (!iwl_mvm_is_dqa_supported(mvm) || if (iwl_mvm_is_dqa_supported(mvm) &&
unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
tid);
goto release_locks;
} else if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue, mvm->first_agg_queue,
mvm->last_agg_queue); mvm->last_agg_queue);
if (txq_id < 0) { if (txq_id < 0) {
ret = txq_id; ret = txq_id;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n"); IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks; goto release_locks;
} }
...@@ -1982,7 +2216,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1982,7 +2216,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* TXQ hasn't yet been enabled, so mark it only as reserved */ /* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
} }
spin_unlock_bh(&mvm->queue_info_lock);
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n", "AGG for tid %d will be on queue #%d\n",
...@@ -2006,8 +2241,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2006,8 +2241,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
ret = 0; ret = 0;
goto out;
release_locks: release_locks:
spin_unlock(&mvm->queue_info_lock);
out:
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
return ret; return ret;
...@@ -2023,6 +2261,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2023,6 +2261,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret; int queue, ret;
bool alloc_queue = true; bool alloc_queue = true;
enum iwl_mvm_queue_status queue_status;
u16 ssn; u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = { struct iwl_trans_txq_scd_cfg cfg = {
...@@ -2048,13 +2287,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2048,13 +2287,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvm->queue_info_lock);
queue_status = mvm->queue_info[queue].status;
spin_unlock_bh(&mvm->queue_info_lock);
/* In DQA mode, the existing queue might need to be reconfigured */ /* In DQA mode, the existing queue might need to be reconfigured */
if (iwl_mvm_is_dqa_supported(mvm)) { if (iwl_mvm_is_dqa_supported(mvm)) {
spin_lock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */ /* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false; alloc_queue = false;
spin_unlock_bh(&mvm->queue_info_lock);
/* /*
* Only reconfig the SCD for the queue if the window size has * Only reconfig the SCD for the queue if the window size has
...@@ -2089,9 +2330,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2089,9 +2330,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
&cfg, wdg_timeout); &cfg, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); /* Send ADD_STA command to enable aggs only if the queue isn't shared */
if (ret) if (queue_status != IWL_MVM_QUEUE_SHARED) {
return -EIO; ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
return -EIO;
}
/* No need to mark as reserved */ /* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
...@@ -2123,7 +2367,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2123,7 +2367,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 txq_id; u16 txq_id;
int err; int err;
/* /*
* If mac80211 is cleaning its state, then say that we finished since * If mac80211 is cleaning its state, then say that we finished since
* our state has been cleared anyway. * our state has been cleared anyway.
...@@ -2152,6 +2395,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2152,6 +2395,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/ */
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) { switch (tid_data->state) {
...@@ -2412,9 +2656,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, ...@@ -2412,9 +2656,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
/* verify the key details match the required command's expectations */ /* verify the key details match the required command's expectations */
if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
(keyconf->keyidx != 4 && keyconf->keyidx != 5))) (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
return -EINVAL;
if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
return -EINVAL; return -EINVAL;
igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
...@@ -2430,11 +2680,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, ...@@ -2430,11 +2680,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_AES_CMAC:
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
break; break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
igtk_cmd.ctrl_flags |=
cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn; pn = seq.aes_cmac.pn;
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
...@@ -2449,6 +2706,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, ...@@ -2449,6 +2706,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
remove_key ? "removing" : "installing", remove_key ? "removing" : "installing",
igtk_cmd.sta_id); igtk_cmd.sta_id);
if (!iwl_mvm_has_new_rx_api(mvm)) {
struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
.ctrl_flags = igtk_cmd.ctrl_flags,
.key_id = igtk_cmd.key_id,
.sta_id = igtk_cmd.sta_id,
.receive_seq_cnt = igtk_cmd.receive_seq_cnt
};
memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
ARRAY_SIZE(igtk_cmd_v1.igtk));
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd_v1), &igtk_cmd_v1);
}
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd), &igtk_cmd); sizeof(igtk_cmd), &igtk_cmd);
} }
...@@ -2573,7 +2843,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ...@@ -2573,7 +2843,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
} }
sta_id = mvm_sta->sta_id; sta_id = mvm_sta->sta_id;
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
goto end; goto end;
} }
...@@ -2659,7 +2931,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, ...@@ -2659,7 +2931,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id); keyconf->keyidx, sta_id);
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
......
...@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, ...@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force);
#endif /* __sta_h__ */ #endif /* __sta_h__ */
...@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, ...@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
} }
} }
/* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{
unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
unsigned long now = jiffies;
int tid;
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
return true;
}
return false;
}
/* /*
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
*/ */
...@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
return 0; return 0;
} }
/* If we are here - TXQ exists and needs to be re-activated */ /* If we are here - TXQ exists and needs to be re-activated */
...@@ -953,8 +968,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -953,8 +968,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id); txq_id);
} }
/* Keep track of the time of the last frame for this RA/TID */ if (iwl_mvm_is_dqa_supported(mvm)) {
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; /* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
/*
* If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them
*
* Note that the mvm->queue_info_lock isn't being taken here in
* order to not serialize the TX flow. This isn't dangerous
* because scheduling mvm->add_stream_wk can't ruin the state,
* and if we DON'T schedule it due to some race condition then
* next TX we get here we will.
*/
if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED &&
iwl_mvm_txq_should_update(mvm, txq_id)))
schedule_work(&mvm->add_stream_wk);
}
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
......
...@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, ...@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 1, .action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit, .window = frame_limit,
.sta_id = sta_id, .sta_id = sta_id,
.ssn = cpu_to_le16(ssn), .ssn = cpu_to_le16(ssn),
...@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
tid_to_mac80211_ac[cfg->tid]; tid_to_mac80211_ac[cfg->tid];
else else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
mvm->queue_info[queue].txq_tid = cfg->tid;
} }
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (enable_queue) { if (enable_queue) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 1, .action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit, .window = cfg->frame_limit,
.sta_id = cfg->sta_id, .sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn), .ssn = cpu_to_le16(ssn),
...@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .action = SCD_CFG_DISABLE_QUEUE,
}; };
bool remove_mac_queue = true; bool remove_mac_queue = true;
int ret; int ret;
...@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
~BIT(mac80211_queue); ~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--; mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
if (!cmd.enable) SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_to_mac80211); mvm->queue_info[queue].hw_queue_to_mac80211);
/* If the queue is still enabled - nothing left to do in this func */ /* If the queue is still enabled - nothing left to do in this func */
if (cmd.enable) { if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
return; return;
} }
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */ /* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount || WARN(mvm->queue_info[queue].hw_queue_refcount ||
...@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, ...@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
} }
/* TODO: if queue was shared - need to re-enable AGGs */ /* If the queue is marked as shared - "unshare" it */
if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
queue);
}
} }
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
......
...@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
/* a000 Series */ /* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
...@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans; struct iwl_trans *iwl_trans;
int ret; int ret;
...@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
if (iwl_trans->cfg->rf_id) { if (iwl_trans->cfg->rf_id) {
if (cfg == &iwl9260_2ac_cfg) if (cfg == &iwl9460_2ac_cfg &&
cfg_9260lc = &iwl9260lc_2ac_cfg; iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { cfg = &iwl9000lc_2ac_cfg;
cfg = cfg_9260lc; iwl_trans->cfg = cfg;
iwl_trans->cfg = cfg_9260lc;
} }
} }
#endif #endif
......
...@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, ...@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
if (ret) if (ret)
return ret; return ret;
/* Notify the ucode of the loaded section number and status */ /* Notify ucode of loaded section number and status */
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); if (trans->cfg->use_tfh) {
val = val | (sec_num << shift_param); val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); val = val | (sec_num << shift_param);
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
} else {
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
}
sec_num = (sec_num << 1) | 0x1; sec_num = (sec_num << 1) | 0x1;
} }
...@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, ...@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans); iwl_enable_interrupts(trans);
if (cpu == 1) if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); if (cpu == 1)
else iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 0xFFFF);
else
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFFFFFF);
} else {
if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFF);
else
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFFFFFF);
}
return 0; return 0;
} }
...@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, ...@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return ret; return ret;
} }
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
iwl_set_bits_prph(trans,
CSR_UCODE_LOAD_STATUS_ADDR,
(LMPM_CPU_UCODE_LOADING_COMPLETED |
LMPM_CPU_HDRS_LOADING_COMPLETED |
LMPM_CPU_UCODE_LOADING_STARTED) <<
shift_param);
*first_ucode_section = last_read_idx; *first_ucode_section = last_read_idx;
return 0; return 0;
...@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) ...@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr); txq->q.read_ptr, txq->q.write_ptr);
if (trans->cfg->use_tfh)
/* TODO: access new SCD registers and dump them */
return;
scd_sram_addr = trans_pcie->scd_base_addr + scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
......
...@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) ...@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
if (trans->cfg->use_tfh)
return;
trans_pcie->scd_base_addr = trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
...@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) ...@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
} }
} }
if (trans->cfg->use_tfh) if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, TFH_TRANSFER_MODE, iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ | TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 | TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE); TFH_CHUNK_SPLIT_MODE);
return 0;
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20) if (trans->cfg->base_params->num_of_queues > 20)
...@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (test_and_set_bit(txq_id, trans_pcie->queue_used)) if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
if (cfg && trans->cfg->use_tfh)
WARN_ONCE(1, "Expected no calls to SCD configuration");
txq->wd_timeout = msecs_to_jiffies(wdg_timeout); txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) { if (cfg) {
...@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, ...@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return; return;
} }
if (configure_scd && trans->cfg->use_tfh)
WARN_ONCE(1, "Expected no calls to SCD configuration");
if (configure_scd) { if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id); iwl_scd_txq_set_inactive(trans, txq_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment