Commit c2da9133 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2016-08-30-2' of...

Merge tag 'iwlwifi-next-for-kalle-2016-08-30-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* Preparation for new HW continues;
* Some DQA improvements;
* Support for GMAC;
parents 60747ef4 76f8c0e1
...@@ -72,15 +72,15 @@ ...@@ -72,15 +72,15 @@
#define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000 #define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-" #define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-" #define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9000_MODULE_FIRMWARE(api) \ #define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode" IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \ #define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode" IWL9260_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260LC_MODULE_FIRMWARE(api) \ #define IWL9000LC_MODULE_FIRMWARE(api) \
IWL9260LC_FW_PRE "-" __stringify(api) ".ucode" IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10 #define NVM_HW_SECTION_NUM_FAMILY_9000 10
...@@ -146,6 +146,16 @@ static const struct iwl_tt_params iwl9000_tt_params = { ...@@ -146,6 +146,16 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mac_addr_from_csr = true, \ .mac_addr_from_csr = true, \
.rf_id = true .rf_id = true
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9260_2ac_cfg = { const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260", .name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260_FW_PRE, .fw_name_pre = IWL9260_FW_PRE,
...@@ -156,13 +166,9 @@ const struct iwl_cfg iwl9260_2ac_cfg = { ...@@ -156,13 +166,9 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
/* const struct iwl_cfg iwl9270_2ac_cfg = {
* TODO the struct below is for internal testing only this should be .name = "Intel(R) Dual Band Wireless AC 9270",
* removed by EO 2016~ .fw_name_pre = IWL9260_FW_PRE,
*/
const struct iwl_cfg iwl9260lc_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
.fw_name_pre = IWL9260LC_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params, .ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION, .nvm_ver = IWL9000_NVM_VERSION,
...@@ -170,8 +176,8 @@ const struct iwl_cfg iwl9260lc_2ac_cfg = { ...@@ -170,8 +176,8 @@ const struct iwl_cfg iwl9260lc_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
}; };
const struct iwl_cfg iwl5165_2ac_cfg = { const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 5165", .name = "Intel(R) Dual Band Wireless AC 9460",
.fw_name_pre = IWL9000_FW_PRE, .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000, IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params, .ht_params = &iwl9000_ht_params,
...@@ -181,6 +187,21 @@ const struct iwl_cfg iwl5165_2ac_cfg = { ...@@ -181,6 +187,21 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
.integrated = true, .integrated = true,
}; };
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
*/
const struct iwl_cfg iwl9000lc_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9000",
.fw_name_pre = IWL9000LC_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
...@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg; ...@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9000lc_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260lc_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg; extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
......
...@@ -77,7 +77,6 @@ ...@@ -77,7 +77,6 @@
*/ */
#define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000) #define FH_MEM_UPPER_BOUND (0x2000)
#define TFH_MEM_LOWER_BOUND (0xA06000)
/** /**
* Keep-Warm (KW) buffer base address. * Keep-Warm (KW) buffer base address.
...@@ -120,7 +119,7 @@ ...@@ -120,7 +119,7 @@
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
/* a000 TFD table address, 64 bit */ /* a000 TFD table address, 64 bit */
#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00) #define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */ /* Find TFD CB base pointer for given queue */
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
...@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* In case of DRAM read address which is not aligned to 128B, the TFH will * In case of DRAM read address which is not aligned to 128B, the TFH will
* enable transfer size which doesn't cross 64B DRAM address boundary. * enable transfer size which doesn't cross 64B DRAM address boundary.
*/ */
#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40) #define TFH_TRANSFER_MODE (0x1F40)
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc #define TFH_TRANSFER_MAX_PENDING_REQ 0xc
#define TFH_CHUNK_SIZE_128 BIT(8) #define TFH_CHUNK_SIZE_128 BIT(8)
#define TFH_CHUNK_SPLIT_MODE BIT(10) #define TFH_CHUNK_SPLIT_MODE BIT(10)
...@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* the start of the TFD first TB. * the start of the TFD first TB.
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
*/ */
#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48) #define TFH_TXCMD_UPDATE_CFG (0x1F48)
/* /*
* Controls TX DMA operation * Controls TX DMA operation
* *
...@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, ...@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* set to 1 - interrupt is sent to the driver * set to 1 - interrupt is sent to the driver
* Bit 0: Indicates the snoop configuration * Bit 0: Indicates the snoop configuration
*/ */
#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60) #define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
#define TFH_SRV_DMA_SNOOP BIT(0) #define TFH_SRV_DMA_SNOOP BIT(0)
#define TFH_SRV_DMA_TO_DRIVER BIT(24) #define TFH_SRV_DMA_TO_DRIVER BIT(24)
#define TFH_SRV_DMA_START BIT(31) #define TFH_SRV_DMA_START BIT(31)
/* Defines the DMA SRAM write start address to transfer a data block */ /* Defines the DMA SRAM write start address to transfer a data block */
#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64) #define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
/* Defines the 64bits DRAM start address to read the DMA data block from */ /* Defines the 64bits DRAM start address to read the DMA data block from */
#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68) #define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
/* /*
* Defines the number of bytes to transfer from DRAM to SRAM. * Defines the number of bytes to transfer from DRAM to SRAM.
* Note that this register may be configured with non-dword aligned size. * Note that this register may be configured with non-dword aligned size.
*/ */
#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70) #define TFH_SRV_DMA_CHNL0_BC (0x1F70)
/** /**
* Rx SRAM Control and Status Registers (RSCSR) * Rx SRAM Control and Status Registers (RSCSR)
......
...@@ -302,22 +302,17 @@ ...@@ -302,22 +302,17 @@
#define OSC_CLK_FORCE_CONTROL (0x8) #define OSC_CLK_FORCE_CONTROL (0x8)
#define FH_UCODE_LOAD_STATUS (0x1AF0) #define FH_UCODE_LOAD_STATUS (0x1AF0)
#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
enum secure_load_status_reg {
LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
};
#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38) /*
#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C) * Replacing FH_UCODE_LOAD_STATUS
* This register is writen by driver and is read by uCode during boot flow.
* Note this address is cleared after MAC reset.
*/
#define UREG_UCODE_LOAD_STATUS (0xa05c40)
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
......
...@@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd { ...@@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd {
u8 reserved[3]; u8 reserved[3];
} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
/**
* struct iwl_mvm_mgmt_mcast_key_cmd_v1
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
* @igtk:
* @k1: unused
* @k2: unused
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
*/
struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
__le32 ctrl_flags;
u8 igtk[16];
u8 k1[16];
u8 k2[16];
__le32 key_id;
__le32 sta_id;
__le64 receive_seq_cnt;
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
/** /**
* struct iwl_mvm_mgmt_mcast_key_cmd * struct iwl_mvm_mgmt_mcast_key_cmd
* ( MGMT_MCAST_KEY = 0x1f ) * ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag * @ctrl_flags: %iwl_sta_key_flag
* @IGTK: * @igtk: IGTK master key
* @K1: unused
* @K2: unused
* @sta_id: station ID that support IGTK * @sta_id: station ID that support IGTK
* @key_id: * @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check * @receive_seq_cnt: initial RSC/PN needed for replay check
*/ */
struct iwl_mvm_mgmt_mcast_key_cmd { struct iwl_mvm_mgmt_mcast_key_cmd {
__le32 ctrl_flags; __le32 ctrl_flags;
u8 IGTK[16]; u8 igtk[32];
u8 K1[16];
u8 K2[16];
__le32 key_id; __le32 key_id;
__le32 sta_id; __le32 sta_id;
__le64 receive_seq_cnt; __le64 receive_seq_cnt;
} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
struct iwl_mvm_wep_key { struct iwl_mvm_wep_key {
u8 key_index; u8 key_index;
......
...@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) ...@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff; tx_resp->frame_count) & 0xfff;
} }
/* Available options for the SCD_QUEUE_CFG HCMD */
enum iwl_scd_cfg_actions {
SCD_CFG_DISABLE_QUEUE = 0x0,
SCD_CFG_ENABLE_QUEUE = 0x1,
SCD_CFG_UPDATE_QUEUE_TID = 0x2,
};
/** /**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token: * @token:
* @sta_id: station id * @sta_id: station id
* @tid: * @tid:
* @scd_queue: scheduler queue to confiug * @scd_queue: scheduler queue to confiug
* @enable: 1 queue enable, 0 queue disable * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
* Value is one of %iwl_scd_cfg_actions options
* @aggregate: 1 aggregated queue, 0 otherwise * @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: %enum iwl_mvm_tx_fifo * @tx_fifo: %enum iwl_mvm_tx_fifo
* @window: BA window size * @window: BA window size
...@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd { ...@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd {
u8 sta_id; u8 sta_id;
u8 tid; u8 tid;
u8 scd_queue; u8 scd_queue;
u8 enable; u8 action;
u8 aggregate; u8 aggregate;
u8 tx_fifo; u8 tx_fifo;
u8 window; u8 window;
......
...@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd { ...@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd {
* @block_size: the block size in powers of 2 * @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command. * @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side * @device_phy_addr: virtual addresses from device side
* 32 bit address for API version 1, 64 bit address for API version 2.
*/ */
struct iwl_fw_paging_cmd { struct iwl_fw_paging_cmd {
__le32 flags; __le32 flags;
__le32 block_size; __le32 block_size;
__le32 block_num; __le32 block_num;
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; union {
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
__le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
} device_phy_addr;
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
/* /*
* Fw items ID's * Fw items ID's
......
...@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm, ...@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
/* send paging cmd to FW in case CPU2 has paging image */ /* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{ {
int blk_idx; struct iwl_fw_paging_cmd paging_cmd = {
__le32 dev_phy_addr;
struct iwl_fw_paging_cmd fw_paging_cmd = {
.flags = .flags =
cpu_to_le32(PAGING_CMD_IS_SECURED | cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED | PAGING_CMD_IS_ENABLED |
...@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) ...@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk), .block_num = cpu_to_le32(mvm->num_of_paging_blk),
}; };
int blk_idx, size = sizeof(paging_cmd);
/* A bit hard coded - but this is the old API and will be deprecated */
if (!iwl_mvm_has_new_tx_api(mvm))
size -= NUM_OF_FW_PAGING_BLOCKS * 4;
/* loop for for all paging blocks + CSS block */ /* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
dev_phy_addr = dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
PAGE_2_EXP_SIZE); addr = addr >> PAGE_2_EXP_SIZE;
fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
if (iwl_mvm_has_new_tx_api(mvm)) {
__le64 phy_addr = cpu_to_le64(addr);
paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
} else {
__le32 phy_addr = cpu_to_le32(addr);
paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
}
} }
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0), IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(fw_paging_cmd), &fw_paging_cmd); 0, size, &paging_cmd);
} }
/* /*
......
...@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4); BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers; hw->wiphy->cipher_suites = mvm->ciphers;
...@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->ciphers[hw->wiphy->n_cipher_suites] = mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC; WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++; hw->wiphy->n_cipher_suites++;
if (iwl_mvm_has_new_rx_api(mvm)) {
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_128;
hw->wiphy->n_cipher_suites++;
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_BIP_GMAC_256;
hw->wiphy->n_cipher_suites++;
}
} }
/* currently FW API supports only one optional cipher scheme */ /* currently FW API supports only one optional cipher scheme */
...@@ -2746,6 +2754,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ...@@ -2746,6 +2754,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break; break;
case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break; break;
case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP40:
...@@ -2779,9 +2789,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ...@@ -2779,9 +2789,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0; * GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy * on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same. * we don't support them for RX, so do the same.
* CMAC in AP/IBSS modes must be done in software. * CMAC/GMAC in AP/IBSS modes must be done in software.
*/ */
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
else else
ret = 0; ret = 0;
......
...@@ -697,6 +697,10 @@ struct iwl_mvm_baid_data { ...@@ -697,6 +697,10 @@ struct iwl_mvm_baid_data {
* it. In this state, when a new queue is needed to be allocated but no * it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to * such free queue exists, an inactive queue might be freed and given to
* the new RA/TID. * the new RA/TID.
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
* This is the state of a queue that has had traffic pass through it, but
* needs to be reconfigured for some reason, e.g. the queue needs to
* become unshared and aggregations re-enabled on.
*/ */
enum iwl_mvm_queue_status { enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_FREE,
...@@ -704,10 +708,11 @@ enum iwl_mvm_queue_status { ...@@ -704,10 +708,11 @@ enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED, IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE, IWL_MVM_QUEUE_INACTIVE,
IWL_MVM_QUEUE_RECONFIGURING,
}; };
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
#define IWL_MVM_NUM_CIPHERS 8 #define IWL_MVM_NUM_CIPHERS 10
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
...@@ -767,6 +772,7 @@ struct iwl_mvm { ...@@ -767,6 +772,7 @@ struct iwl_mvm {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */ bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */ /* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
...@@ -1122,6 +1128,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) ...@@ -1122,6 +1128,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
} }
static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
{
return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
(queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
}
static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
{
return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
(queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{ {
bool nvm_lar = mvm->nvm_data->lar_enabled; bool nvm_lar = mvm->nvm_data->lar_enabled;
...@@ -1192,6 +1210,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) ...@@ -1192,6 +1210,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
} }
static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
{
/* TODO - replace with TLV once defined */
return mvm->trans->cfg->use_tfh;
}
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{ {
#ifdef CONFIG_THERMAL #ifdef CONFIG_THERMAL
......
...@@ -132,6 +132,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -132,6 +132,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_CCMP_PN_LEN) <= 0) IEEE80211_CCMP_PN_LEN) <= 0)
return -1; return -1;
if (!(stats->flag & RX_FLAG_AMSDU_MORE))
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
stats->flag |= RX_FLAG_PN_VALIDATED; stats->flag |= RX_FLAG_PN_VALIDATED;
...@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
if (!(desc->amsdu_info &
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
} }
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid); iwl_mvm_agg_rx_received(mvm, baid);
......
...@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, ...@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force);
#endif /* __sta_h__ */ #endif /* __sta_h__ */
...@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, ...@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
} }
} }
/* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{
unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
unsigned long now = jiffies;
int tid;
for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
return true;
}
return false;
}
/* /*
* Sets the fields in the Tx cmd that are crypto related * Sets the fields in the Tx cmd that are crypto related
*/ */
...@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
return 0; return 0;
} }
/* If we are here - TXQ exists and needs to be re-activated */ /* If we are here - TXQ exists and needs to be re-activated */
...@@ -953,9 +968,26 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -953,9 +968,26 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id); txq_id);
} }
if (iwl_mvm_is_dqa_supported(mvm)) {
/* Keep track of the time of the last frame for this RA/TID */ /* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
/*
* If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them
*
* Note that the mvm->queue_info_lock isn't being taken here in
* order to not serialize the TX flow. This isn't dangerous
* because scheduling mvm->add_stream_wk can't ruin the state,
* and if we DON'T schedule it due to some race condition then
* next TX we get here we will.
*/
if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED &&
iwl_mvm_txq_should_update(mvm, txq_id)))
schedule_work(&mvm->add_stream_wk);
}
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
......
...@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, ...@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 1, .action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit, .window = frame_limit,
.sta_id = sta_id, .sta_id = sta_id,
.ssn = cpu_to_le16(ssn), .ssn = cpu_to_le16(ssn),
...@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
tid_to_mac80211_ac[cfg->tid]; tid_to_mac80211_ac[cfg->tid];
else else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
mvm->queue_info[queue].txq_tid = cfg->tid;
} }
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (enable_queue) { if (enable_queue) {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 1, .action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit, .window = cfg->frame_limit,
.sta_id = cfg->sta_id, .sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn), .ssn = cpu_to_le16(ssn),
...@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .action = SCD_CFG_DISABLE_QUEUE,
}; };
bool remove_mac_queue = true; bool remove_mac_queue = true;
int ret; int ret;
...@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
~BIT(mac80211_queue); ~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--; mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
if (!cmd.enable) SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
...@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_to_mac80211); mvm->queue_info[queue].hw_queue_to_mac80211);
/* If the queue is still enabled - nothing left to do in this func */ /* If the queue is still enabled - nothing left to do in this func */
if (cmd.enable) { if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
return; return;
} }
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */ /* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount || WARN(mvm->queue_info[queue].hw_queue_refcount ||
...@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, ...@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
} }
/* TODO: if queue was shared - need to re-enable AGGs */ /* If the queue is marked as shared - "unshare" it */
if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
queue);
}
} }
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
......
...@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
/* a000 Series */ /* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
...@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans; struct iwl_trans *iwl_trans;
int ret; int ret;
...@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
if (iwl_trans->cfg->rf_id) { if (iwl_trans->cfg->rf_id) {
if (cfg == &iwl9260_2ac_cfg) if (cfg == &iwl9460_2ac_cfg &&
cfg_9260lc = &iwl9260lc_2ac_cfg; iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { cfg = &iwl9000lc_2ac_cfg;
cfg = cfg_9260lc; iwl_trans->cfg = cfg;
iwl_trans->cfg = cfg_9260lc;
} }
} }
#endif #endif
......
...@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, ...@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
if (ret) if (ret)
return ret; return ret;
/* Notify the ucode of the loaded section number and status */ /* Notify ucode of loaded section number and status */
if (trans->cfg->use_tfh) {
val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param);
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
} else {
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param); val = val | (sec_num << shift_param);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
}
sec_num = (sec_num << 1) | 0x1; sec_num = (sec_num << 1) | 0x1;
} }
...@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, ...@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans); iwl_enable_interrupts(trans);
if (trans->cfg->use_tfh) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
else
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFFFFFF);
} else {
if (cpu == 1) if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFF);
else else
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFFFFFF);
}
return 0; return 0;
} }
...@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, ...@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return ret; return ret;
} }
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
iwl_set_bits_prph(trans,
CSR_UCODE_LOAD_STATUS_ADDR,
(LMPM_CPU_UCODE_LOADING_COMPLETED |
LMPM_CPU_HDRS_LOADING_COMPLETED |
LMPM_CPU_UCODE_LOADING_STARTED) <<
shift_param);
*first_ucode_section = last_read_idx; *first_ucode_section = last_read_idx;
return 0; return 0;
...@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) ...@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr); txq->q.read_ptr, txq->q.write_ptr);
if (trans->cfg->use_tfh)
/* TODO: access new SCD registers and dump them */
return;
scd_sram_addr = trans_pcie->scd_base_addr + scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
......
...@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) ...@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
if (trans->cfg->use_tfh)
return;
trans_pcie->scd_base_addr = trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
...@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) ...@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
} }
} }
if (trans->cfg->use_tfh) if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, TFH_TRANSFER_MODE, iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ | TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 | TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE); TFH_CHUNK_SPLIT_MODE);
return 0;
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20) if (trans->cfg->base_params->num_of_queues > 20)
...@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, ...@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (test_and_set_bit(txq_id, trans_pcie->queue_used)) if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
if (cfg && trans->cfg->use_tfh)
WARN_ONCE(1, "Expected no calls to SCD configuration");
txq->wd_timeout = msecs_to_jiffies(wdg_timeout); txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) { if (cfg) {
...@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, ...@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return; return;
} }
if (configure_scd && trans->cfg->use_tfh)
WARN_ONCE(1, "Expected no calls to SCD configuration");
if (configure_scd) { if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id); iwl_scd_txq_set_inactive(trans, txq_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment