Commit 12d56175 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2018-11-11' of...

Merge tag 'iwlwifi-next-for-kalle-2018-11-11' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

First set of iwlwifi patches for 4.21

* PCI IDs for some new 9000-series cards;
* Improve antenna usage on connection problems;
* Some improvements in the debugging code;
* Other clean-ups and small fixes;
parents bb38177c 56b657f7
...@@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, ...@@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return; return;
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
/* Pull RXF1 */ /* Pull RXF1 */
iwl_fwrt_dump_rxf(fwrt, dump_data, iwl_fwrt_dump_rxf(fwrt, dump_data,
cfg->lmac[0].rxfifo1_size, 0, 0); cfg->lmac[0].rxfifo1_size, 0, 0);
...@@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, ...@@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2); LMAC2_PRPH_OFFSET, 2);
} }
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
/* Pull TXF data from LMAC1 */ /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */ /* Mark the number of TXF we're pulling now */
...@@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, ...@@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
} }
} }
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa, fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */ /* Pull UMAC internal TXF data from all TXFs */
...@@ -603,7 +603,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, ...@@ -603,7 +603,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
u32 fifo_len = 0; u32 fifo_len = 0;
int i; int i;
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF))) if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
goto dump_txf; goto dump_txf;
/* Count RXF2 size */ /* Count RXF2 size */
...@@ -614,7 +614,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, ...@@ -614,7 +614,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
dump_txf: dump_txf:
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF))) if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
goto dump_internal_txf; goto dump_internal_txf;
/* Count TXF sizes */ /* Count TXF sizes */
...@@ -627,7 +627,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, ...@@ -627,7 +627,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
} }
dump_internal_txf: dump_internal_txf:
if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa, fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
goto out; goto out;
...@@ -639,6 +639,32 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, ...@@ -639,6 +639,32 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
return fifo_len; return fifo_len;
} }
static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **data)
{
int i;
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
fwrt->fw_paging_db[i].fw_paging_block;
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
(*data)->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)(*data)->data;
paging->index = cpu_to_le32(i);
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
PAGING_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
(*data) = iwl_fw_error_next_data(*data);
}
}
static struct iwl_fw_error_dump_file * static struct iwl_fw_error_dump_file *
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_ptrs *fw_error_dump) struct iwl_fw_dump_ptrs *fw_error_dump)
...@@ -655,13 +681,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -655,13 +681,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len; 0 : fwrt->trans->cfg->dccm2_len;
bool monitor_dump_only = false;
int i; int i;
if (fwrt->dump.trig &&
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
/* SRAM - include stack CCM if driver knows the values for it */ /* SRAM - include stack CCM if driver knows the values for it */
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img; const struct fw_img *img;
...@@ -680,22 +701,22 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -680,22 +701,22 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
/* Make room for PRPH registers */ /* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 && if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
prph_len += iwl_fw_get_prph_len(fwrt); prph_len += iwl_fw_get_prph_len(fwrt);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
} }
file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
file_len += sizeof(*dump_data) + sizeof(*dump_info); file_len += sizeof(*dump_data) + sizeof(*dump_info);
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
size_t hdr_len = sizeof(*dump_data) + size_t hdr_len = sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_mem); sizeof(struct iwl_fw_error_dump_mem);
...@@ -712,10 +733,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -712,10 +733,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
} }
/* Make room for fw's virtual image pages, if it exists */ /* Make room for fw's virtual image pages, if it exists */
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && if (iwl_fw_dbg_is_paging_enabled(fwrt))
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk * file_len += fwrt->num_of_paging_blk *
(sizeof(*dump_data) + (sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) + sizeof(struct iwl_fw_error_dump_paging) +
...@@ -727,12 +745,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -727,12 +745,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
} }
/* If we only want a monitor dump, reset the file length */ /* If we only want a monitor dump, reset the file length */
if (monitor_dump_only) { if (fwrt->dump.monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg); sizeof(*dump_info) + sizeof(*dump_smem_cfg);
} }
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len; fwrt->dump.desc->len;
...@@ -746,7 +764,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -746,7 +764,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data; dump_data = (void *)dump_file->data;
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data; dump_info = (void *)dump_data->data;
...@@ -767,7 +785,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -767,7 +785,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
dump_data = iwl_fw_error_next_data(dump_data); dump_data = iwl_fw_error_next_data(dump_data);
} }
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */ /* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
...@@ -804,7 +822,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -804,7 +822,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
iwl_read_radio_regs(fwrt, &dump_data); iwl_read_radio_regs(fwrt, &dump_data);
} }
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) { fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) + dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
...@@ -817,10 +835,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -817,10 +835,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
} }
/* In case we only want monitor dump, skip to dump trasport data */ /* In case we only want monitor dump, skip to dump trasport data */
if (monitor_dump_only) if (fwrt->dump.monitor_only)
goto out; goto out;
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
fwrt->fw->dbg.mem_tlv; fwrt->fw->dbg.mem_tlv;
...@@ -865,30 +883,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, ...@@ -865,30 +883,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
} }
/* Dump fw's virtual image */ /* Dump fw's virtual image */
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && if (iwl_fw_dbg_is_paging_enabled(fwrt))
!fwrt->trans->cfg->gen2 && iwl_dump_paging(fwrt, &dump_data);
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
fwrt->fw_paging_db[i].fw_paging_block;
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
paging = (void *)dump_data->data;
paging->index = cpu_to_le32(i);
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
PAGING_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
dump_data = iwl_fw_error_next_data(dump_data);
}
}
if (prph_len) { if (prph_len) {
iwl_dump_prph(fwrt->trans, &dump_data, iwl_dump_prph(fwrt->trans, &dump_data,
...@@ -932,7 +928,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -932,7 +928,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
} }
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
fwrt->dump.trig); fwrt->dump.monitor_only);
file_len = le32_to_cpu(dump_file->file_len); file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len; fw_error_dump->fwrt_len = file_len;
if (fw_error_dump->trans_ptr) { if (fw_error_dump->trans_ptr) {
...@@ -998,7 +994,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -998,7 +994,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump); IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc, void *trigger, const struct iwl_fw_dump_desc *desc,
bool monitor_only,
unsigned int delay) unsigned int delay)
{ {
/* /*
...@@ -1028,7 +1025,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, ...@@ -1028,7 +1025,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
le32_to_cpu(desc->trig_desc.type)); le32_to_cpu(desc->trig_desc.type));
fwrt->dump.desc = desc; fwrt->dump.desc = desc;
fwrt->dump.trig = trigger; fwrt->dump.monitor_only = monitor_only;
schedule_delayed_work(&fwrt->dump.wk, delay); schedule_delayed_work(&fwrt->dump.wk, delay);
...@@ -1043,6 +1040,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, ...@@ -1043,6 +1040,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
{ {
struct iwl_fw_dump_desc *desc; struct iwl_fw_dump_desc *desc;
unsigned int delay = 0; unsigned int delay = 0;
bool monitor_only = false;
if (trigger) { if (trigger) {
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
...@@ -1059,6 +1057,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, ...@@ -1059,6 +1057,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
trigger->occurrences = cpu_to_le16(occurrences); trigger->occurrences = cpu_to_le16(occurrences);
delay = le16_to_cpu(trigger->trig_dis_ms); delay = le16_to_cpu(trigger->trig_dis_ms);
monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
} }
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
...@@ -1070,7 +1069,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, ...@@ -1070,7 +1069,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
desc->trig_desc.type = cpu_to_le32(trig); desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len); memcpy(desc->trig_desc.data, str, len);
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
} }
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
......
...@@ -101,13 +101,12 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) ...@@ -101,13 +101,12 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert) if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc); kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL; fwrt->dump.desc = NULL;
fwrt->dump.trig = NULL;
} }
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc, const struct iwl_fw_dump_desc *desc,
void *trigger, unsigned int delay); bool monitor_only, unsigned int delay);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, const char *str, size_t len,
...@@ -310,12 +309,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) ...@@ -310,12 +309,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump_wk(struct work_struct *work); void iwl_fw_error_dump_wk(struct work_struct *work);
static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
{
return (fwrt->fw->dbg.dump_mask & BIT(type));
}
static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
{ {
return fw_has_capa(&fwrt->fw->ucode_capa, return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) && IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
fwrt->trans->cfg->d3_debug_data_length && fwrt->trans->cfg->d3_debug_data_length &&
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
}
static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
} }
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
......
...@@ -131,7 +131,7 @@ struct iwl_fw_runtime { ...@@ -131,7 +131,7 @@ struct iwl_fw_runtime {
/* debug */ /* debug */
struct { struct {
const struct iwl_fw_dump_desc *desc; const struct iwl_fw_dump_desc *desc;
const struct iwl_fw_dbg_trigger_tlv *trig; bool monitor_only;
struct delayed_work wk; struct delayed_work wk;
u8 conf; u8 conf;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -394,6 +396,7 @@ enum aux_misc_master1_en { ...@@ -394,6 +396,7 @@ enum aux_misc_master1_en {
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08 #define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0 #define PREG_AUX_BUS_WPROT_0 0xA04CC0
#define PREG_PRPH_WPROT_0 0xA04CE0
#define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34 #define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_1_STATUS 0xA038C0
...@@ -420,4 +423,8 @@ enum { ...@@ -420,4 +423,8 @@ enum {
#define UREG_CHICK (0xA05C00) #define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSI_ENABLE BIT(24)
#define UREG_CHICK_MSIX_ENABLE BIT(25) #define UREG_CHICK_MSIX_ENABLE BIT(25)
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
#define PREG_WFPM_ACCESS BIT(12)
#endif /* __iwl_prph_h__ */ #endif /* __iwl_prph_h__ */
...@@ -602,8 +602,7 @@ struct iwl_trans_ops { ...@@ -602,8 +602,7 @@ struct iwl_trans_ops {
void (*resume)(struct iwl_trans *trans); void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
const struct iwl_fw_dbg_trigger_tlv bool monitor_only);
*trigger);
}; };
/** /**
...@@ -897,12 +896,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans) ...@@ -897,12 +896,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
} }
static inline struct iwl_trans_dump_data * static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, iwl_trans_dump_data(struct iwl_trans *trans, bool monitor_only)
const struct iwl_fw_dbg_trigger_tlv *trigger)
{ {
if (!trans->ops->dump_data) if (!trans->ops->dump_data)
return NULL; return NULL;
return trans->ops->dump_data(trans, trigger); return trans->ops->dump_data(trans, monitor_only);
} }
static inline struct iwl_device_cmd * static inline struct iwl_device_cmd *
......
...@@ -1956,7 +1956,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -1956,7 +1956,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
set_bit(STATUS_FW_ERROR, &mvm->trans->status); set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm); iwl_mvm_dump_nic_error_log(mvm);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL, 0); false, 0);
ret = 1; ret = 1;
goto err; goto err;
} }
......
...@@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, ...@@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
int len; int len;
len = scnprintf(buf, sizeof(buf) - 1, len = scnprintf(buf, sizeof(buf) - 1,
"traffic=%d\ndbgfs=%d\nvcmd=%d\n", "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
!!(mvmvif->low_latency & LOW_LATENCY_VCMD)); !!(mvmvif->low_latency & LOW_LATENCY_VCMD),
!!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
...@@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, ...@@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
static const char * const chanwidths[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
[NL80211_CHAN_WIDTH_20] = "ht20",
[NL80211_CHAN_WIDTH_40] = "ht40",
[NL80211_CHAN_WIDTH_80] = "vht80",
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
[NL80211_CHAN_WIDTH_160] = "vht160",
};
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
......
...@@ -965,11 +965,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, ...@@ -965,11 +965,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
tx->tx_flags = cpu_to_le32(tx_flags); tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa, if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
mvm->mgmt_last_antenna_idx = iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
}
tx->rate_n_flags = tx->rate_n_flags =
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
......
...@@ -809,6 +809,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ...@@ -809,6 +809,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
sta = NULL; sta = NULL;
/* If there is no sta, and it's not offchannel - send through AP */
if (info->control.vif->type == NL80211_IFTYPE_STATION &&
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id < IWL_MVM_STATION_COUNT) {
/* mac80211 holds rcu read lock */
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
if (IS_ERR_OR_NULL(sta))
goto drop;
}
}
if (sta) { if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb)) if (iwl_mvm_defer_tx(mvm, sta, skb))
return; return;
...@@ -2379,6 +2394,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, ...@@ -2379,6 +2394,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* must be set before quota calculations */ /* must be set before quota calculations */
mvmvif->ap_ibss_active = true; mvmvif->ap_ibss_active = true;
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
iwl_mvm_vif_set_low_latency(mvmvif, true,
LOW_LATENCY_VIF_TYPE);
iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
}
/* power updated needs to be done before quotas */ /* power updated needs to be done before quotas */
iwl_mvm_power_update_mac(mvm); iwl_mvm_power_update_mac(mvm);
...@@ -2441,6 +2462,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, ...@@ -2441,6 +2462,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = false; mvmvif->ap_ibss_active = false;
mvm->ap_last_beacon_gp2 = 0; mvm->ap_last_beacon_gp2 = 0;
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
iwl_mvm_vif_set_low_latency(mvmvif, false,
LOW_LATENCY_VIF_TYPE);
iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id);
}
iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
......
...@@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode { ...@@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode {
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VCMD: low latency mode set from vendor command
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
*/ */
enum iwl_mvm_low_latency_cause { enum iwl_mvm_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VCMD = BIT(2),
LOW_LATENCY_VIF_TYPE = BIT(3),
}; };
/** /**
...@@ -844,7 +846,6 @@ struct iwl_mvm { ...@@ -844,7 +846,6 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
...@@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) ...@@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
mvm->fw->valid_rx_ant; mvm->fw->valid_rx_ant;
} }
static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
{
*ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
}
static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
{ {
u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
...@@ -1846,6 +1852,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1846,6 +1852,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* get SystemLowLatencyMode - only needed for beacon threshold? */ /* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
u16 mac_id);
/* get VMACLowLatencyMode */ /* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
......
...@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list); INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock); spin_lock_init(&mvm->time_event_lock);
spin_lock_init(&mvm->queue_info_lock);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
...@@ -846,6 +845,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -846,6 +845,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm); iwl_mvm_tof_init(mvm);
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
return op_mode; return op_mode;
out_unregister: out_unregister:
...@@ -1108,11 +1109,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, ...@@ -1108,11 +1109,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq; unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_stop_mac_queues(mvm, mq); iwl_mvm_stop_mac_queues(mvm, mq);
} }
...@@ -1138,11 +1135,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) ...@@ -1138,11 +1135,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq; unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->hw_queue_to_mac80211[hw_queue];
spin_unlock_bh(&mvm->queue_info_lock);
iwl_mvm_start_mac_queues(mvm, mq); iwl_mvm_start_mac_queues(mvm, mq);
} }
...@@ -1240,7 +1233,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) ...@@ -1240,7 +1233,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/ */
if (!mvm->fw_restart && fw_error) { if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL, 0); false, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe; struct iwl_mvm_reprobe *reprobe;
......
...@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) ...@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
{ {
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 supp = 0; u8 supp = 0;
if (he_cap && he_cap->has_he)
return 0;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
......
...@@ -1422,12 +1422,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -1422,12 +1422,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* update aggregation data for monitor sake on default queue */ /* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
u64 he_phy_data;
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref; rx_status->ampdu_reference = mvm->ampdu_ref;
......
...@@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, ...@@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
{ {
u32 tx_ant; u32 tx_ant;
mvm->scan_last_antenna_idx = iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
if (band == NL80211_BAND_2GHZ && !no_cck) if (band == NL80211_BAND_2GHZ && !no_cck)
......
...@@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, ...@@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
...@@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
return -EINVAL; return -EINVAL;
if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_has_new_tx_api(mvm)) {
spin_lock_bh(&mvm->queue_info_lock);
if (remove_mac_queue) if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &= mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue); ~BIT(mac80211_queue);
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_free(mvm->trans, queue); iwl_trans_txq_free(mvm->trans, queue);
return 0; return 0;
} }
spin_lock_bh(&mvm->queue_info_lock); if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
spin_unlock_bh(&mvm->queue_info_lock);
return 0; return 0;
}
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
...@@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->hw_queue_to_mac80211[queue]); mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */ /* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) { if (cmd.action == SCD_CFG_ENABLE_QUEUE)
spin_unlock_bh(&mvm->queue_info_lock);
return 0; return 0;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid; cmd.tid = mvm->queue_info[queue].txq_tid;
...@@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, ...@@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
/* Regardless if this is a reserved TXQ for a STA - mark it as false */ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false; mvm->queue_info[queue].reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
...@@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) ...@@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)); lockdep_is_held(&mvm->mutex));
...@@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) ...@@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
...@@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) ...@@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
rcu_read_unlock(); rcu_read_unlock();
/*
* The TX path may have been using this TXQ_ID from the tid_data,
* so make sure it's no longer running so that we can safely reuse
* this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
* above, but nothing guarantees we've stopped using them. Thus,
* without this, we could get to iwl_mvm_disable_txq() and remove
* the queue while still sending frames to it.
*/
synchronize_net();
return disable_agg_tids; return disable_agg_tids;
} }
...@@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, ...@@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac; txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid; tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
same_sta = sta_id == new_sta_id; same_sta = sta_id == new_sta_id;
...@@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, ...@@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* by the inactivity checker. * by the inactivity checker.
*/ */
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
...@@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
* value 3 and VO with value 0, so to check if ac X is lower than ac Y * value 3 and VO with value 0, so to check if ac X is lower than ac Y
* we need to check if the numerical value of X is LARGER than of Y. * we need to check if the numerical value of X is LARGER than of Y.
*/ */
spin_lock_bh(&mvm->queue_info_lock);
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"No redirection needed on TXQ #%d\n", "No redirection needed on TXQ #%d\n",
queue); queue);
...@@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tid = mvm->queue_info[queue].txq_tid; cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue]; mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]); queue, iwl_mvm_ac_to_tx_fifo[ac]);
...@@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */ /* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid; mvm->queue_info[queue].txq_tid = tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
...@@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ...@@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
/* Update AC marking of the queue */ /* Update AC marking of the queue */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].mac80211_ac = ac; mvm->queue_info[queue].mac80211_ac = ac;
spin_unlock_bh(&mvm->queue_info_lock);
/* /*
* Mark queue as shared in transport if shared * Mark queue as shared in transport if shared
...@@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, ...@@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
{ {
int i; int i;
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->mutex);
/* This should not be hit with new TX path */ /* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
...@@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, ...@@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
{ {
bool enable_queue = true; bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */ /* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, tid); queue, tid);
return false; return false;
...@@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, ...@@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
queue, mvm->queue_info[queue].tid_bitmap, queue, mvm->queue_info[queue].tid_bitmap,
mvm->hw_queue_to_mac80211[queue]); mvm->hw_queue_to_mac80211[queue]);
spin_unlock_bh(&mvm->queue_info_lock);
return enable_queue; return enable_queue;
} }
...@@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) ...@@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return; return;
spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
return; return;
...@@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) ...@@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
return; return;
} }
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid; mvm->queue_info[queue].txq_tid = tid;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
queue, tid); queue, tid);
} }
...@@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ...@@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id; sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap; tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
/* Find TID for queue, and make sure it is the only one on the queue */ /* Find TID for queue, and make sure it is the only one on the queue */
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
...@@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ...@@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
} }
} }
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
} }
/* /*
...@@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, ...@@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
int tid; int tid;
lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false; return false;
...@@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
return -ENOSPC; return -ENOSPC;
spin_lock_bh(&mvm->queue_info_lock);
rcu_read_lock(); rcu_read_lock();
/* we skip the CMD queue below by starting at 1 */ /* we skip the CMD queue below by starting at 1 */
...@@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* this isn't so nice, but works OK due to the way we loop */ spin_lock_bh(&mvmsta->lock);
spin_unlock(&mvm->queue_info_lock);
/* and we need this locking order */
spin_lock(&mvmsta->lock);
spin_lock(&mvm->queue_info_lock);
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap, inactive_tid_bitmap,
&unshare_queues, &unshare_queues,
...@@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) ...@@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (ret >= 0 && free_queue < 0) if (ret >= 0 && free_queue < 0)
free_queue = ret; free_queue = ret;
/* only unlock sta lock - we still need the queue info lock */ /* only unlock sta lock - we still need the queue info lock */
spin_unlock(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock_bh(&mvm->queue_info_lock);
/* Reconfigure queues requiring reconfiguation */ /* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
...@@ -1296,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1296,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
tfd_queue_mask = mvmsta->tfd_queue_msk; tfd_queue_mask = mvmsta->tfd_queue_msk;
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
spin_lock_bh(&mvm->queue_info_lock);
/* /*
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
* exists * exists
...@@ -1327,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1327,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) { if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
/* try harder - perhaps kill an inactive queue */ /* try harder - perhaps kill an inactive queue */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
spin_lock_bh(&mvm->queue_info_lock);
} }
/* No free queue - we'll have to share */ /* No free queue - we'll have to share */
...@@ -1353,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -1353,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (queue > 0 && !shared_queue) if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/* This shouldn't happen - out of queues */ /* This shouldn't happen - out of queues */
if (WARN_ON(queue <= 0)) { if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
...@@ -1556,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -1556,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
/* run the general cleanup/unsharing of queues */ /* run the general cleanup/unsharing of queues */
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */ /* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
...@@ -1569,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -1569,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) { if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
/* try again - this time kick out a queue if needed */ /* try again - this time kick out a queue if needed */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
if (queue < 0) { if (queue < 0) {
IWL_ERR(mvm, "No available queues for new station\n"); IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC; return -ENOSPC;
} }
spin_lock_bh(&mvm->queue_info_lock);
} }
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
spin_unlock_bh(&mvm->queue_info_lock);
mvmsta->reserved_queue = queue; mvmsta->reserved_queue = queue;
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
...@@ -1822,6 +1768,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -1822,6 +1768,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (iwl_mvm_has_tlc_offload(mvm)) if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta); iwl_mvm_rs_add_sta(mvm, mvm_sta);
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
update_fw: update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
if (ret) if (ret)
...@@ -2004,18 +1952,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -2004,18 +1952,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* is still marked as IWL_MVM_QUEUE_RESERVED, and * is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again * should be manually marked as free again
*/ */
spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status; status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE), (*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d", "sta_id %d reserved txq %d status %d",
sta_id, reserved_txq, *status)) { sta_id, reserved_txq, *status))
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL; return -EINVAL;
}
*status = IWL_MVM_QUEUE_FREE; *status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
} }
if (vif->type == NL80211_IFTYPE_STATION && if (vif->type == NL80211_IFTYPE_STATION &&
...@@ -2873,8 +2817,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2873,8 +2817,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO; return -EIO;
} }
spin_lock(&mvm->queue_info_lock);
/* /*
* Note the possible cases: * Note the possible cases:
* 1. An enabled TXQ - TXQ needs to become agg'ed * 1. An enabled TXQ - TXQ needs to become agg'ed
...@@ -2889,7 +2831,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2889,7 +2831,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (txq_id < 0) { if (txq_id < 0) {
ret = txq_id; ret = txq_id;
IWL_ERR(mvm, "Failed to allocate agg queue\n"); IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks; goto out;
} }
/* TXQ hasn't yet been enabled, so mark it only as reserved */ /* TXQ hasn't yet been enabled, so mark it only as reserved */
...@@ -2900,11 +2842,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2900,11 +2842,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n", "Can't start tid %d agg on shared queue!\n",
tid); tid);
goto release_locks; goto out;
} }
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n", "AGG for tid %d will be on queue #%d\n",
tid, txq_id); tid, txq_id);
...@@ -2935,10 +2875,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2935,10 +2875,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
ret = 0; ret = 0;
goto out;
release_locks:
spin_unlock(&mvm->queue_info_lock);
out: out:
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
...@@ -3007,9 +2944,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -3007,9 +2944,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvm->queue_info_lock);
queue_status = mvm->queue_info[queue].status; queue_status = mvm->queue_info[queue].status;
spin_unlock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */ /* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
...@@ -3055,9 +2990,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -3055,9 +2990,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
/* No need to mark as reserved */ /* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
out: out:
/* /*
...@@ -3083,10 +3016,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, ...@@ -3083,10 +3016,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
{ {
u16 txq_id = tid_data->txq_id; u16 txq_id = tid_data->txq_id;
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_has_new_tx_api(mvm)) if (iwl_mvm_has_new_tx_api(mvm))
return; return;
spin_lock_bh(&mvm->queue_info_lock);
/* /*
* The TXQ is marked as reserved only if no traffic came through yet * The TXQ is marked as reserved only if no traffic came through yet
* This means no traffic has been sent on this TID (agg'd or not), so * This means no traffic has been sent on this TID (agg'd or not), so
...@@ -3098,8 +3032,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, ...@@ -3098,8 +3032,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
tid_data->txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
} }
spin_unlock_bh(&mvm->queue_info_lock);
} }
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
......
...@@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data { ...@@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data {
* @ptk_pn: per-queue PTK PN data structures * @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data * @dup_data: per queue duplicate packet detection data
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
* exchange).
* *
* When mac80211 creates a station it reserves some space (hw->sta_data_size) * When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that * in the structure for use by driver. This structure is placed in that
...@@ -439,6 +442,7 @@ struct iwl_mvm_sta { ...@@ -439,6 +442,7 @@ struct iwl_mvm_sta {
u8 agg_tids; u8 agg_tids;
u8 sleep_tx_count; u8 sleep_tx_count;
u8 avg_energy; u8 avg_energy;
u8 tx_ant;
}; };
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
......
...@@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
offload_assist)); offload_assist));
} }
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
if (info->band == NL80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
if (sta && ieee80211_is_data(fc)) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
}
return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
}
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
int rate_idx; int rate_idx;
u8 rate_plcp; u8 rate_plcp;
u32 rate_flags; u32 rate_flags = 0;
/* HT rate doesn't make sense for a non data frame */ /* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
...@@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, ...@@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
/* Get PLCP rate for tx_cmd->rate_n_flags */ /* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
if (info->band == NL80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
rate_flags =
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
/* Set CCK flag as needed */ /* Set CCK flag as needed */
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK; rate_flags |= RATE_MCS_CCK_MSK;
...@@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, ...@@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
return (u32)rate_plcp | rate_flags; return (u32)rate_plcp | rate_flags;
} }
static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
return iwl_mvm_get_tx_rate(mvm, info, sta) |
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
}
/* /*
* Sets the fields in the Tx cmd that are rate related * Sets the fields in the Tx cmd that are rate related
*/ */
...@@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, ...@@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
*/ */
if (ieee80211_is_data(fc) && sta) { if (ieee80211_is_data(fc) && sta) {
tx_cmd->initial_rate_index = 0; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return; if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
}
} else if (ieee80211_is_back_req(fc)) { } else if (ieee80211_is_back_req(fc)) {
tx_cmd->tx_flags |= tx_cmd->tx_flags |=
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
} }
mvm->mgmt_last_antenna_idx =
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
/* Set the rate in the TX cmd */ /* Set the rate in the TX cmd */
tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); tx_cmd->rate_n_flags =
cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
} }
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
...@@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 offload_assist = 0; u16 offload_assist = 0;
u32 rate_n_flags = 0; u32 rate_n_flags = 0;
u16 flags = 0; u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
if (ieee80211_is_data_qos(hdr->frame_control)) { if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr); u8 *qc = ieee80211_get_qos_ctl(hdr);
...@@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!info->control.hw_key) if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS; flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/* For data packets rate info comes from the fw */ /*
if (!(ieee80211_is_data(hdr->frame_control) && sta)) { * For data packets rate info comes from the fw. Only
* set rate/antenna during connection establishment.
*/
if (sta && (!ieee80211_is_data(hdr->frame_control) ||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
flags |= IWL_TX_FLAGS_CMD_RATE; flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta); rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
hdr->frame_control);
} }
if (mvm->trans->cfg->device_family >= if (mvm->trans->cfg->device_family >=
...@@ -1160,11 +1187,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -1160,11 +1187,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* If we have timed-out TIDs - schedule the worker that will * If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them * reconfig the queues and update them
* *
* Note that the mvm->queue_info_lock isn't being taken here in * Note that the no lock is taken here in order to not serialize
* order to not serialize the TX flow. This isn't dangerous * the TX flow. This isn't dangerous because scheduling
* because scheduling mvm->add_stream_wk can't ruin the state, * mvm->add_stream_wk can't ruin the state, and if we DON'T
* and if we DON'T schedule it due to some race condition then * schedule it due to some race condition then next TX we get
* next TX we get here we will. * here we will.
*/ */
if (unlikely(mvm->queue_info[txq_id].status == if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED && IWL_MVM_QUEUE_SHARED &&
...@@ -1501,6 +1528,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1501,6 +1528,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
break; break;
} }
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
ieee80211_is_mgmt(hdr->frame_control))
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
/* /*
* If we are freeing multiple frames, mark all the frames * If we are freeing multiple frames, mark all the frames
* but the first one as acked, since they were acknowledged * but the first one as acked, since they were acknowledged
...@@ -1600,6 +1631,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -1600,6 +1631,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_tx_airtime(mvm, mvmsta, iwl_mvm_tx_airtime(mvm, mvmsta,
le16_to_cpu(tx_resp->wireless_media_time)); le16_to_cpu(tx_resp->wireless_media_time));
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
if (sta->wme && tid != IWL_MGMT_TID) { if (sta->wme && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data = struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid]; &mvmsta->tid_data[tid];
......
...@@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) ...@@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx; return last_idx;
} }
#define FW_SYSASSERT_CPU_MASK 0xf0000000
static const struct { static const struct {
const char *name; const char *name;
u8 num; u8 num;
...@@ -301,6 +302,9 @@ static const struct { ...@@ -301,6 +302,9 @@ static const struct {
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
{ "NMI_INTERRUPT_HOST", 0x66 }, { "NMI_INTERRUPT_HOST", 0x66 },
{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
{ "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
...@@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num) ...@@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num)
int i; int i;
for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
if (advanced_lookup[i].num == num) if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
return advanced_lookup[i].name; return advanced_lookup[i].name;
/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
...@@ -618,13 +622,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, ...@@ -618,13 +622,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL; return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].tid_bitmap == 0, if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) { "Trying to reconfig unallocated queue %d\n", queue))
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO; return -ENXIO;
}
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
...@@ -768,6 +768,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) ...@@ -768,6 +768,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
return result; return result;
} }
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
bool low_latency, u16 mac_id)
{
struct iwl_mac_low_latency_cmd cmd = {
.mac_id = cpu_to_le32(mac_id)
};
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
return;
if (low_latency) {
/* currently we don't care about the direction */
cmd.low_latency_rx = 1;
cmd.low_latency_tx = 1;
}
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd))
IWL_ERR(mvm, "Failed to send low latency command\n");
}
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool low_latency, bool low_latency,
enum iwl_mvm_low_latency_cause cause) enum iwl_mvm_low_latency_cause cause)
...@@ -786,24 +809,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -786,24 +809,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (low_latency == prev) if (low_latency == prev)
return 0; return 0;
if (fw_has_capa(&mvm->fw->ucode_capa, iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
struct iwl_mac_low_latency_cmd cmd = {
.mac_id = cpu_to_le32(mvmvif->id)
};
if (low_latency) {
/* currently we don't care about the direction */
cmd.low_latency_rx = 1;
cmd.low_latency_tx = 1;
}
res = iwl_mvm_send_cmd_pdu(mvm,
iwl_cmd_id(LOW_LATENCY_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
if (res)
IWL_ERR(mvm, "Failed to send low latency command\n");
}
res = iwl_mvm_update_quotas(mvm, false, NULL); res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res) if (res)
......
...@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
......
...@@ -1729,6 +1729,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, ...@@ -1729,6 +1729,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 hpm;
int err; int err;
lockdep_assert_held(&trans_pcie->mutex); lockdep_assert_held(&trans_pcie->mutex);
...@@ -1739,6 +1740,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) ...@@ -1739,6 +1740,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err; return err;
} }
hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) &
PREG_WFPM_ACCESS) {
IWL_ERR(trans,
"Error, can not clear persistence bit\n");
return -EPERM;
}
iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT);
}
iwl_trans_pcie_sw_reset(trans); iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans); err = iwl_pcie_apm_init(trans);
...@@ -2978,7 +2990,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len) ...@@ -2978,7 +2990,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
static struct iwl_trans_dump_data static struct iwl_trans_dump_data
*iwl_trans_pcie_dump_data(struct iwl_trans *trans, *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
const struct iwl_fw_dbg_trigger_tlv *trigger) bool monitor_only)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data; struct iwl_fw_error_dump_data *data;
...@@ -3002,7 +3014,7 @@ static struct iwl_trans_dump_data ...@@ -3002,7 +3014,7 @@ static struct iwl_trans_dump_data
/* FW monitor */ /* FW monitor */
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { if (monitor_only) {
if (!(trans->dbg_dump_mask & if (!(trans->dbg_dump_mask &
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))) BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
return NULL; return NULL;
......
...@@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, ...@@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
/* Place first TFD at index corresponding to start sequence number */ /* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr; txq->read_ptr = wr_ptr;
txq->write_ptr = wr_ptr; txq->write_ptr = wr_ptr;
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(txq->write_ptr) | (qid << 16));
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
iwl_free_resp(hcmd); iwl_free_resp(hcmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment