Commit b13b3cdf authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2017-11-03' of...

Merge tag 'iwlwifi-next-for-kalle-2017-11-03' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

iwlwifi updates

* Some new PCI IDs;
* A bunch of cleanups;
* The timers update by Kees;
* Add more register dump call-sites;
* A fix for a locking issue in the TX flush code;
* Actual implementation of the TX flush code for A000;
* An optimization to drop RX frames during restart to avoid BA issues;
parents 2798b80b 57b36f7f
......@@ -100,14 +100,6 @@
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
#define MAX_RX_AGG_SIZE_8260_SDIO 21
#define MAX_TX_AGG_SIZE_8260_SDIO 40
/* Max A-MPDU exponent for HT and VHT */
#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
......@@ -234,48 +226,5 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8265",
.fw_name_pre = IWL8265_FW_PRE,
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
......@@ -81,28 +81,4 @@ struct iwl_fw_paging_cmd {
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
/**
* enum iwl_fw_item_id - FW item IDs
*
* @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
* download
*/
enum iwl_fw_item_id {
IWL_FW_ITEM_ID_PAGING = 3,
};
/**
* struct iwl_fw_get_item_cmd - get an item from the fw
* @item_id: ID of item to obtain, see &enum iwl_fw_item_id
*/
struct iwl_fw_get_item_cmd {
__le32 item_id;
} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
struct iwl_fw_get_item_resp {
__le32 item_id;
__le32 item_byte_cnt;
__le32 item_val;
} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
#endif /* __iwl_fw_api_paging_h__ */
......@@ -136,7 +136,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_PAGING = 32,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
/* 35 is unused */
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
......
......@@ -138,11 +138,6 @@ struct fw_img {
u32 paging_mem_size;
};
struct iwl_sf_region {
u32 addr;
u32 size;
};
/*
* Block paging calculations
*/
......@@ -257,7 +252,6 @@ enum iwl_fw_type {
* @type: firmware type (&enum iwl_fw_type)
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
......@@ -290,8 +284,6 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
u32 sdio_adma_addr;
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
......
......@@ -87,9 +87,6 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt)
get_order(paging->fw_paging_size));
paging->fw_paging_block = NULL;
}
kfree(fwrt->trans->paging_download_buf);
fwrt->trans->paging_download_buf = NULL;
fwrt->trans->paging_db = NULL;
memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
}
......@@ -100,13 +97,11 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
{
struct page *block;
dma_addr_t phys = 0;
int blk_idx, order, num_of_pages, size, dma_enabled;
int blk_idx, order, num_of_pages, size;
if (fwrt->fw_paging_db[0].fw_paging_block)
return 0;
dma_enabled = is_device_dma_capable(fwrt->trans->dev);
/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
......@@ -139,24 +134,18 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
if (dma_enabled) {
phys = dma_map_page(fwrt->trans->dev, block, 0,
PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(fwrt->trans->dev, phys)) {
/*
* free the previous pages and the current one
* since we failed to map_page.
*/
iwl_free_fw_paging(fwrt);
return -ENOMEM;
}
fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
} else {
fwrt->fw_paging_db[blk_idx].fw_paging_phys =
PAGING_ADDR_SIG |
blk_idx << BLOCK_2_EXP_SIZE;
phys = dma_map_page(fwrt->trans->dev, block, 0,
PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(fwrt->trans->dev, phys)) {
/*
* free the previous pages and the current one
* since we failed to map_page.
*/
iwl_free_fw_paging(fwrt);
return -ENOMEM;
}
fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
if (!blk_idx)
IWL_DEBUG_FW(fwrt,
......@@ -312,60 +301,6 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
/*
* Send paging item cmd to FW in case CPU2 has paging image
*/
static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt)
{
int ret;
struct iwl_fw_get_item_cmd fw_get_item_cmd = {
.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
};
struct iwl_fw_get_item_resp *item_resp;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &fw_get_item_cmd, },
.len = { sizeof(fw_get_item_cmd), },
};
ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
if (ret) {
IWL_ERR(fwrt,
"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
ret);
return ret;
}
item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
IWL_ERR(fwrt,
"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
le32_to_cpu(item_resp->item_id));
ret = -EIO;
goto exit;
}
/* Add an extra page for headers */
fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
FW_PAGING_SIZE,
GFP_KERNEL);
if (!fwrt->trans->paging_download_buf) {
ret = -ENOMEM;
goto exit;
}
fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
fwrt->trans->paging_db = fwrt->fw_paging_db;
IWL_DEBUG_FW(fwrt,
"Paging: got paging request address (paging_req_addr 0x%08x)\n",
fwrt->trans->paging_req_addr);
exit:
iwl_free_resp(&cmd);
return ret;
}
int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
{
const struct fw_img *fw = &fwrt->fw->img[type];
......@@ -382,20 +317,6 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
if (!fw->paging_mem_size)
return 0;
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(fwrt->trans->dev)) {
ret = iwl_trans_get_paging_item(fwrt);
if (ret) {
IWL_ERR(fwrt, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(fwrt, fw);
if (ret) {
IWL_ERR(fwrt, "failed to save the FW paging image\n");
......
......@@ -467,9 +467,6 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
......
......@@ -218,7 +218,6 @@
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
......@@ -229,7 +228,6 @@
CSR_INT_BIT_HW_ERR | \
CSR_INT_BIT_FH_TX | \
CSR_INT_BIT_SW_ERR | \
CSR_INT_BIT_PAGING | \
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
......
......@@ -1039,12 +1039,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
drv->fw.img[usniffer_img].paging_mem_size =
paging_mem_size;
break;
case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
drv->fw.sdio_adma_addr =
le32_to_cpup((__le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
/*
* Don't return an error in case of a shorter tlv_len
......
......@@ -398,8 +398,6 @@ struct iwl_hcmd_arr {
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
* @command_groups_size: number of command groups, to avoid illegal access
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
*/
......@@ -419,8 +417,6 @@ struct iwl_trans_config {
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
u32 sdio_adma_addr;
u8 cb_data_offs;
};
......@@ -524,6 +520,9 @@ struct iwl_trans_txq_scd_cfg {
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @dump_regs: dump using IWL_ERR configuration space and memory mapped
* registers of the device to diagnose failure, e.g., when HW becomes
* inaccessible.
*/
struct iwl_trans_ops {
......@@ -531,8 +530,6 @@ struct iwl_trans_ops {
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
int (*update_sf)(struct iwl_trans *trans,
struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans, bool low_power);
......@@ -593,6 +590,8 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
const struct iwl_fw_dbg_trigger_tlv
*trigger);
void (*dump_regs)(struct iwl_trans *trans);
};
/**
......@@ -700,12 +699,6 @@ enum iwl_plat_pm_mode {
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
* @paging_req_addr: The location were the FW will upload / download the pages
* from. The address is set by the opmode
* @paging_db: Pointer to the opmode paging data base, the pointer is set by
* the opmode.
* @paging_download_buf: Buffer used for copying all of the pages before
* downloading them to the FW. The buffer is allocated in the opmode
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
......@@ -754,14 +747,6 @@ struct iwl_trans {
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
/*
* Paging parameters - All of the parameters should be set by the
* opmode when paging is enabled
*/
u32 paging_req_addr;
struct iwl_fw_paging *paging_db;
void *paging_download_buf;
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
......@@ -828,17 +813,6 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
static inline int iwl_trans_update_sf(struct iwl_trans *trans,
struct iwl_sf_region *st_fwrd_space)
{
might_sleep();
if (trans->ops->update_sf)
return trans->ops->update_sf(trans, st_fwrd_space);
return 0;
}
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
bool low_power)
{
......@@ -896,6 +870,12 @@ iwl_trans_dump_data(struct iwl_trans *trans,
return trans->ops->dump_data(trans, trigger);
}
static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
{
if (trans->ops->dump_regs)
trans->ops->dump_regs(trans);
}
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
......
......@@ -196,8 +196,6 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->error_event_table[1] =
le32_to_cpu(lmac2->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
umac_error_event_table = le32_to_cpu(umac->error_info_addr);
......@@ -266,7 +264,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
int ret, i;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
......@@ -320,18 +317,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
/*
* update the sdio allocation according to the pointer we get in the
* alive notification.
*/
st_fwrd_space.addr = mvm->sf_space.addr;
st_fwrd_space.size = mvm->sf_space.size;
ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
if (ret) {
IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
return ret;
}
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
......
......@@ -4002,39 +4002,36 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
{
if (drop) {
if (iwl_mvm_has_new_tx_api(mvm))
/* TODO new tx api */
WARN_ONCE(1,
"Need to implement flush TX queue\n");
else
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues,
0);
} else {
if (iwl_mvm_has_new_tx_api(mvm)) {
struct ieee80211_sta *sta;
int i;
int i;
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (drop) {
mutex_lock(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
}
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues, 0);
mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans,
queues);
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
return;
}
mutex_lock(&mvm->mutex);
for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
struct ieee80211_sta *sta;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
continue;
if (drop)
iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
else
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
}
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
......@@ -4294,9 +4291,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* TODO - remove a000 disablement when we have RXQ config API */
if (!iwl_mvm_has_new_rx_api(mvm) ||
mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
if (!iwl_mvm_has_new_rx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
......@@ -4305,6 +4300,13 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
/* TODO - remove this when we have RXQ config API */
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
qmask = BIT(0);
if (notif->sync)
atomic_set(&mvm->queue_sync_counter, 1);
}
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
......
......@@ -652,6 +652,7 @@ struct iwl_mvm_baid_data {
u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm_baid_data __rcu **rcu_ptr;
struct iwl_mvm *mvm;
struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
struct iwl_mvm_reorder_buf_entry entries[];
......@@ -754,7 +755,6 @@ struct iwl_mvm {
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
struct iwl_sf_region sf_space;
u32 ampdu_ref;
bool ampdu_toggle;
......@@ -1854,7 +1854,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size);
void iwl_mvm_reorder_timer_expired(unsigned long data);
void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
......
......@@ -703,7 +703,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
/* Set a short watchdog for the command queue */
......
......@@ -67,12 +67,8 @@ static u8 rs_ht_to_legacy[] = {
static const u8 ant_toggle_lookup[] = {
[ANT_NONE] = ANT_NONE,
[ANT_A] = ANT_B,
[ANT_B] = ANT_C,
[ANT_AB] = ANT_BC,
[ANT_C] = ANT_A,
[ANT_AC] = ANT_AB,
[ANT_BC] = ANT_AC,
[ANT_ABC] = ANT_ABC,
[ANT_B] = ANT_A,
[ANT_AB] = ANT_AB,
};
#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
......@@ -975,7 +971,7 @@ static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
{
u8 new_ant_type;
if (!rate->ant || rate->ant > ANT_ABC)
if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C))
return 0;
if (!rs_is_valid_ant(valid_ant, rate->ant))
......
......@@ -460,9 +460,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
}
}
void iwl_mvm_reorder_timer_expired(unsigned long data)
void iwl_mvm_reorder_timer_expired(struct timer_list *t)
{
struct iwl_mvm_reorder_buffer *buf = (void *)data;
struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
struct iwl_mvm_baid_data *baid_data =
iwl_mvm_baid_data_from_reorder_buf(buf);
struct iwl_mvm_reorder_buf_entry *entries =
......@@ -719,6 +719,22 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
}
/*
* release immediately if there are no stored frames, and the sn is
* equal to the head.
* This can happen due to reorder timer, where NSSN is behind head_sn.
* When we released everything, and we got the next frame in the
* sequence, according to the NSSN we can't release immediately,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
if (!amsdu || last_subframe)
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
}
index = sn % buffer->buf_size;
/*
......@@ -818,6 +834,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct sk_buff *skb;
u8 crypt_len = 0;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
......
......@@ -252,9 +252,11 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
static void iwl_mvm_rx_agg_session_expired(unsigned long data)
static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
{
struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
struct iwl_mvm_baid_data *data =
from_timer(data, t, session_timer);
struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
struct iwl_mvm_baid_data *ba_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvm_sta;
......@@ -644,8 +646,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
/* Redirect to lower AC */
iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn);
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
/* Update AC marking of the queue */
spin_lock_bh(&mvm->queue_info_lock);
......@@ -1258,6 +1259,14 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
mvm_sta->sta_id,
i, wdg_timeout);
tid_data->txq_id = txq_id;
/*
* Since we don't set the seq number after reset, and HW
* sets it now, FW reset will cause the seq num to start
* at 0 again, so driver will need to update it
* internally as well, so it keeps in sync with real val
*/
tid_data->seq_number = 0;
} else {
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
......@@ -2153,10 +2162,8 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->head_sn = ssn;
reorder_buf->buf_size = buf_size;
/* rx reorder timer */
reorder_buf->reorder_timer.function =
iwl_mvm_reorder_timer_expired;
reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
init_timer(&reorder_buf->reorder_timer);
timer_setup(&reorder_buf->reorder_timer,
iwl_mvm_reorder_timer_expired, 0);
spin_lock_init(&reorder_buf->lock);
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
......@@ -2279,9 +2286,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
setup_timer(&baid_data->session_timer,
iwl_mvm_rx_agg_session_expired,
(unsigned long)&mvm->baid_map[baid]);
baid_data->rcu_ptr = &mvm->baid_map[baid];
timer_setup(&baid_data->session_timer,
iwl_mvm_rx_agg_session_expired, 0);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
......@@ -2544,12 +2551,6 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
if (!mvm->trans->cfg->gen2)
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
else
buf_size = min_t(int, buf_size,
LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
spin_lock_bh(&mvmsta->lock);
ssn = tid_data->ssn;
queue = tid_data->txq_id;
......@@ -2561,10 +2562,17 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_has_new_tx_api(mvm)) {
/*
* If no queue iwl_mvm_sta_tx_agg_start() would have failed so
* no need to check queue's status
* If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
* would have failed, so if we are here there is no need to
* allocate a queue.
* However, if aggregation size is different than the default
* size, the scheduler should be reconfigured.
* We cannot do this with the new TX API, so return unsupported
* for now, until it will be offloaded to firmware..
* Note that if SCD default value changes - this condition
* should be updated as well.
*/
if (buf_size < mvmsta->max_agg_bufsize)
if (buf_size < IWL_FRAME_LIMIT)
return -ENOTSUPP;
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
......@@ -2587,7 +2595,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Only reconfig the SCD for the queue if the window size has
* changed from current (become smaller)
*/
if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
/*
* If reconfiguring an existing queue, it first must be
* drained
......
......@@ -1594,8 +1594,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
(tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
TX_RES_RATE_TABLE_COLOR_POS;
TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
}
rcu_read_unlock();
......
......@@ -467,6 +467,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
......@@ -485,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
......@@ -510,6 +513,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
......@@ -581,6 +586,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
{0}
......
......@@ -88,7 +88,7 @@
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
static void iwl_trans_pcie_err_dump(struct iwl_trans *trans)
static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
{
#define PCI_DUMP_SIZE 64
#define PREFIX_LEN 32
......@@ -736,7 +736,7 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Failed to load firmware chunk!\n");
iwl_trans_pcie_err_dump(trans);
iwl_trans_pcie_dump_regs(trans);
return -ETIMEDOUT;
}
......@@ -1956,7 +1956,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
iwl_trans_pcie_err_dump(trans);
iwl_trans_pcie_dump_regs(trans);
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
......@@ -3021,6 +3021,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.ref = iwl_trans_pcie_ref, \
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
.dump_regs = iwl_trans_pcie_dump_regs, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume
......
......@@ -1909,6 +1909,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
iwl_trans_dump_regs(trans);
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
iwl_get_cmd_string(trans, cmd->id));
dump_stack();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment