Commit 520f03ea authored by Shahar S Matityahu's avatar Shahar S Matityahu Committed by Luca Coelho

iwlwifi: allow masking out memory areas from the fw dump

Reading and dumping memory areas takes time, and sometimes
dumping all of the areas isn't necessary.

Allow choosing the memory areas which should be dumped.
Signed-off-by: default avatarShahar S Matityahu <shahar.s.matityahu@intel.com>
Signed-off-by: default avatarGolan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 92536c96
This diff is collapsed.
...@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type { ...@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52, IWL_UCODE_TLV_IML = 52,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
}; };
struct iwl_ucode_tlv { struct iwl_ucode_tlv {
......
...@@ -299,6 +299,7 @@ struct iwl_fw { ...@@ -299,6 +299,7 @@ struct iwl_fw {
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa; struct iwl_gscan_capabilities gscan_capa;
u32 dbg_dump_mask;
}; };
static inline const char *get_fw_dbg_mode_string(int mode) static inline const char *get_fw_dbg_mode_string(int mode)
......
...@@ -1043,6 +1043,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1043,6 +1043,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break; break;
} }
case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
if (tlv_len != sizeof(u32)) {
IWL_ERR(drv,
"dbg lst mask size incorrect, skip\n");
break;
}
drv->fw.dbg_dump_mask =
le32_to_cpup((__le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER: case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true; *usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data, iwl_store_ucode_sec(pieces, tlv_data,
...@@ -1316,6 +1327,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1316,6 +1327,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size = fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
/* dump all fw memory areas by default */
fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces) if (!pieces)
......
...@@ -771,6 +771,7 @@ struct iwl_trans { ...@@ -771,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask;
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
......
...@@ -746,6 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -746,6 +746,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv)); sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml; trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len; trans->iml_len = mvm->fw->iml_len;
......
...@@ -84,6 +84,7 @@ ...@@ -84,6 +84,7 @@
#include "iwl-scd.h" #include "iwl-scd.h"
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
#include "fw/error-dump.h" #include "fw/error-dump.h"
#include "fw/dbg.h"
#include "internal.h" #include "internal.h"
#include "iwl-fh.h" #include "iwl-fh.h"
...@@ -2978,7 +2979,8 @@ static struct iwl_trans_dump_data ...@@ -2978,7 +2979,8 @@ static struct iwl_trans_dump_data
u32 monitor_len; u32 monitor_len;
int i, ptr; int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported; !trans->cfg->mq_rx_supported &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */ /* transport dump header */
len = sizeof(*dump_data); len = sizeof(*dump_data);
...@@ -3030,6 +3032,10 @@ static struct iwl_trans_dump_data ...@@ -3030,6 +3032,10 @@ static struct iwl_trans_dump_data
} }
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
if (!(trans->dbg_dump_mask &
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
return NULL;
dump_data = vzalloc(len); dump_data = vzalloc(len);
if (!dump_data) if (!dump_data)
return NULL; return NULL;
...@@ -3042,15 +3048,20 @@ static struct iwl_trans_dump_data ...@@ -3042,15 +3048,20 @@ static struct iwl_trans_dump_data
} }
/* CSR registers */ /* CSR registers */
len += sizeof(*data) + IWL_CSR_TO_DUMP; if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */ /* FH registers */
if (trans->cfg->gen2) if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
len += sizeof(*data) + if (trans->cfg->gen2)
(FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); len += sizeof(*data) +
else (FH_MEM_UPPER_BOUND_GEN2 -
len += sizeof(*data) + FH_MEM_LOWER_BOUND_GEN2);
(FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); else
len += sizeof(*data) +
(FH_MEM_UPPER_BOUND -
FH_MEM_LOWER_BOUND);
}
if (dump_rbs) { if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
...@@ -3066,7 +3077,8 @@ static struct iwl_trans_dump_data ...@@ -3066,7 +3077,8 @@ static struct iwl_trans_dump_data
} }
/* Paged memory for gen2 HW */ /* Paged memory for gen2 HW */
if (trans->cfg->gen2) if (trans->cfg->gen2 &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) + len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) + sizeof(struct iwl_fw_error_dump_paging) +
...@@ -3078,41 +3090,51 @@ static struct iwl_trans_dump_data ...@@ -3078,41 +3090,51 @@ static struct iwl_trans_dump_data
len = 0; len = 0;
data = (void *)dump_data->data; data = (void *)dump_data->data;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data; if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
spin_lock_bh(&cmdq->lock); u16 tfd_size = trans_pcie->tfd_size;
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) { data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); txcmd = (void *)data->data;
u32 caplen, cmdlen; spin_lock_bh(&cmdq->lock);
ptr = cmdq->write_ptr;
cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + for (i = 0; i < cmdq->n_window; i++) {
trans_pcie->tfd_size * ptr); u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); u32 caplen, cmdlen;
if (cmdlen) { cmdlen = iwl_trans_pcie_get_cmdlen(trans,
len += sizeof(*txcmd) + caplen; cmdq->tfds +
txcmd->cmdlen = cpu_to_le32(cmdlen); tfd_size * ptr);
txcmd->caplen = cpu_to_le32(caplen); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
txcmd = (void *)((u8 *)txcmd->data + caplen); if (cmdlen) {
len += sizeof(*txcmd) + caplen;
txcmd->cmdlen = cpu_to_le32(cmdlen);
txcmd->caplen = cpu_to_le32(caplen);
memcpy(txcmd->data, cmdq->entries[idx].cmd,
caplen);
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
ptr = iwl_queue_dec_wrap(trans, ptr);
} }
spin_unlock_bh(&cmdq->lock);
ptr = iwl_queue_dec_wrap(trans, ptr); data->len = cpu_to_le32(len);
len += sizeof(*data);
data = iwl_fw_error_next_data(data);
} }
spin_unlock_bh(&cmdq->lock);
data->len = cpu_to_le32(len);
len += sizeof(*data);
data = iwl_fw_error_next_data(data);
len += iwl_trans_pcie_dump_csr(trans, &data); if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += iwl_trans_pcie_fh_regs_dump(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data);
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs) if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */ /* Paged memory for gen2 HW */
if (trans->cfg->gen2) { if (trans->cfg->gen2 &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging; struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr = dma_addr_t addr =
...@@ -3132,8 +3154,8 @@ static struct iwl_trans_dump_data ...@@ -3132,8 +3154,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len; len += sizeof(*data) + sizeof(*paging) + page_len;
} }
} }
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len; dump_data->len = len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment