Commit 2a455012 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2018-10-06' of...

Merge tag 'iwlwifi-next-for-kalle-2018-10-06' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Third set of iwlwifi patches for 4.20

* Fix for a race condition that caused the FW to crash;
* HE radiotap cleanup and improvements;
* Reorder channel optimization for scans;
* Bumped the FW API version supported after the last API change for
  this release;
* Debugging improvements;
* A few bug fixes;
* Some cleanups in preparation for a new implementation;
* Other small improvements, cleanups and fixes.
parents e1c02eb1 ea7cb829
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#include "iwl-config.h" #include "iwl-config.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 38 #define IWL_22000_UCODE_API_MAX 41
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39 #define IWL_22000_UCODE_API_MIN 39
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#include "fw/file.h" #include "fw/file.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 38 #define IWL9000_UCODE_API_MAX 41
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30 #define IWL9000_UCODE_API_MIN 30
......
...@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp { ...@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
*/ */
struct iwl_nvm_get_info { struct iwl_nvm_get_info {
__le32 reserved; __le32 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ } __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
/** /**
* enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
...@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags { ...@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
* @flags: bit 0: 1 - empty, 0 - non-empty * @flags: bit 0: 1 - empty, 0 - non-empty
* @nvm_version: nvm version * @nvm_version: nvm version
* @board_type: board type * @board_type: board type
* @reserved: reserved * @n_hw_addrs: number of reserved MAC addresses
*/ */
struct iwl_nvm_get_info_general { struct iwl_nvm_get_info_general {
__le32 flags; __le32 flags;
__le16 nvm_version; __le16 nvm_version;
u8 board_type; u8 board_type;
u8 reserved; u8 n_hw_addrs;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ } __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
/** /**
* enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
...@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku { ...@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
struct iwl_nvm_get_info_phy { struct iwl_nvm_get_info_phy {
__le32 tx_chains; __le32 tx_chains;
__le32 rx_chains; __le32 rx_chains;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS (51) #define IWL_NUM_CHANNELS (51)
...@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory { ...@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled; __le32 lar_enabled;
__le16 channel_profile[IWL_NUM_CHANNELS]; __le16 channel_profile[IWL_NUM_CHANNELS];
__le16 reserved; __le16 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/** /**
* struct iwl_nvm_get_info_rsp - response to get NVM data * struct iwl_nvm_get_info_rsp - response to get NVM data
...@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp { ...@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory; struct iwl_nvm_get_info_regulatory regulatory;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */ } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
/** /**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
...@@ -269,22 +269,6 @@ struct iwl_nvm_access_complete_cmd { ...@@ -269,22 +269,6 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved; __le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
* MCC in the cmd response will be the relevant MCC in the NVM.
* @mcc: given mobile country code
* @source_id: the source from where we got the MCC, see iwl_mcc_source
* @reserved: reserved for alignment
*/
struct iwl_mcc_update_cmd_v1 {
__le16 mcc;
u8 source_id;
u8 reserved;
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
/** /**
* struct iwl_mcc_update_cmd - Request the device to update geographic * struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code). * regulatory profile according to the given MCC (Mobile Country Code).
...@@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd { ...@@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd {
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/** /**
* struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. * enum iwl_geo_information - geographic information.
* @GEO_NO_INFO: no special info for this geo profile.
* @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
* for the 5 GHz band.
*/
enum iwl_geo_information {
GEO_NO_INFO = 0,
GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
};
/**
* struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC * Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code). * (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD. * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
...@@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd { ...@@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd {
* @mcc: the new applied MCC * @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC * @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source * @source_id: the MCC source, see iwl_mcc_source
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 * @time: time elapsed from the MCC test start (in units of 30 seconds)
* channels, depending on platform) * @geo_info: geographic specific profile information
* see &enum iwl_geo_information.
* @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first * @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used. * 16bits are used.
*/ */
struct iwl_mcc_update_resp_v1 { struct iwl_mcc_update_resp_v3 {
__le32 status; __le32 status;
__le16 mcc; __le16 mcc;
u8 cap; u8 cap;
u8 source_id; u8 source_id;
__le16 time;
__le16 geo_info;
__le32 n_channels; __le32 n_channels;
__le32 channels[0]; __le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
/**
* enum iwl_geo_information - geographic information.
* @GEO_NO_INFO: no special info for this geo profile.
* @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
* for the 5 GHz band.
*/
enum iwl_geo_information {
GEO_NO_INFO = 0,
GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
};
/** /**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
...@@ -347,25 +335,26 @@ enum iwl_geo_information { ...@@ -347,25 +335,26 @@ enum iwl_geo_information {
* @status: see &enum iwl_mcc_update_status * @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC * @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC * @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source * @time: time elapsed from the MCC test start (in units of 30 seconds)
* @time: time elapsed from the MCC test start (in 30 seconds TU)
* @geo_info: geographic specific profile information * @geo_info: geographic specific profile information
* see &enum iwl_geo_information. * see &enum iwl_geo_information.
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 * @source_id: the MCC source, see iwl_mcc_source
* channels, depending on platform) * @reserved: for four bytes alignment.
* @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first * @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used. * 16bits are used.
*/ */
struct iwl_mcc_update_resp { struct iwl_mcc_update_resp {
__le32 status; __le32 status;
__le16 mcc; __le16 mcc;
u8 cap; __le16 cap;
u8 source_id;
__le16 time; __le16 time;
__le16 geo_info; __le16 geo_info;
u8 source_id;
u8 reserved[3];
__le32 n_channels; __le32 n_channels;
__le32 channels[0]; __le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
/** /**
* struct iwl_mcc_chub_notif - chub notifies of mcc change * struct iwl_mcc_chub_notif - chub notifies of mcc change
......
...@@ -368,10 +368,10 @@ enum iwl_rx_he_phy { ...@@ -368,10 +368,10 @@ enum iwl_rx_he_phy {
/* trigger encoded */ /* trigger encoded */
IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL, IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL,
IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */
IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */
IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO = 0x3, IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */
/* second dword - MU data */ /* second dword - MU data */
IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0), IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0),
......
...@@ -596,9 +596,12 @@ enum iwl_umac_scan_general_flags { ...@@ -596,9 +596,12 @@ enum iwl_umac_scan_general_flags {
* enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
* @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
* notification per channel or not. * notification per channel or not.
* @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
* reorder optimization or not.
*/ */
enum iwl_umac_scan_general_flags2 { enum iwl_umac_scan_general_flags2 {
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
}; };
/** /**
......
This diff is collapsed.
...@@ -107,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) ...@@ -107,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc, const struct iwl_fw_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger); void *trigger, unsigned int delay);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, enum iwl_fw_dbg_trigger trig,
const char *str, size_t len, const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger); struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger, struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4); const char *fmt, ...) __printf(3, 4);
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
unlikely(__dbg_trigger); \ unlikely(__dbg_trigger); \
}) })
static inline struct iwl_fw_dbg_trigger_tlv* static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
{ {
return fw->dbg_trigger_tlv[id]; return fw->dbg.trigger_tlv[id];
} }
#define iwl_fw_dbg_get_trigger(fw, id) ({ \ #define iwl_fw_dbg_get_trigger(fw, id) ({ \
...@@ -154,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, ...@@ -154,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
} }
static inline bool static inline bool
iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
struct iwl_fw_dbg_trigger_tlv *trig)
{ {
unsigned long wind_jiff = unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
u32 id = le32_to_cpu(trig->id);
/* If this is the first event checked, jump to update start ts */ /* If this is the first event checked, jump to update start ts */
if (fwrt->dump.non_collect_ts_start[id] && if (fwrt->dump.non_collect_ts_start[id] &&
...@@ -179,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, ...@@ -179,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false; return false;
if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
le16_to_cpu(trig->trig_dis_ms))) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id); trig->id);
return false; return false;
...@@ -188,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, ...@@ -188,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
} }
static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
const enum iwl_fw_dbg_trigger id)
{
struct iwl_fw_dbg_trigger_tlv *trig;
if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
return NULL;
trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
return NULL;
return trig;
}
#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \
BUILD_BUG_ON(!__builtin_constant_p(id)); \
BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
static inline void static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev, struct wireless_dev *wdev,
...@@ -293,7 +315,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) ...@@ -293,7 +315,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
return fw_has_capa(&fwrt->fw->ucode_capa, return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) && IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
fwrt->trans->cfg->d3_debug_data_length && fwrt->trans->cfg->d3_debug_data_length &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
} }
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
...@@ -344,4 +366,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} ...@@ -344,4 +366,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */ #endif /* CONFIG_IWLWIFI_DEBUGFS */
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */ #endif /* __iwl_fw_dbg_h__ */
...@@ -258,11 +258,75 @@ static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, ...@@ -258,11 +258,75 @@ static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
struct hcmd_write_data {
__be32 cmd_id;
__be32 flags;
__be16 length;
u8 data[0];
} __packed;
static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
size_t count)
{
size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
size_t data_size = (count - 1) / 2;
int ret;
struct hcmd_write_data *data;
struct iwl_host_cmd hcmd = {
.len = { 0, },
.data = { NULL, },
};
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
return -EINVAL;
data = kmalloc(data_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = hex2bin((u8 *)data, buf, data_size);
if (ret)
goto out;
hcmd.id = be32_to_cpu(data->cmd_id);
hcmd.flags = be32_to_cpu(data->flags);
hcmd.len[0] = be16_to_cpu(data->length);
hcmd.data[0] = data->data;
if (count != header_size + hcmd.len[0] * 2 + 1) {
IWL_ERR(fwrt,
"host command data size does not match header length\n");
ret = -EINVAL;
goto out;
}
if (fwrt->ops && fwrt->ops->send_hcmd)
ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
else
ret = -EPERM;
if (ret < 0)
goto out;
if (hcmd.flags & CMD_WANT_SKB)
iwl_free_resp(&hcmd);
out:
kfree(data);
return ret ?: count;
}
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir) struct dentry *dbgfs_dir)
{ {
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
return 0; return 0;
err: err:
IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
......
...@@ -328,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) ...@@ -328,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply. * the firmware sends a tx reply.
* @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
*/ */
enum iwl_fw_dbg_trigger { enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0, FW_DBG_TRIGGER_INVALID = 0,
...@@ -345,6 +346,7 @@ enum iwl_fw_dbg_trigger { ...@@ -345,6 +346,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TDLS,
FW_DBG_TRIGGER_TX_STATUS, FW_DBG_TRIGGER_TX_STATUS,
FW_DBG_TRIGGER_NO_ALIVE,
/* must be last */ /* must be last */
FW_DBG_TRIGGER_MAX, FW_DBG_TRIGGER_MAX,
......
...@@ -337,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; ...@@ -337,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* antenna the beacon should be transmitted * antenna the beacon should be transmitted
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
* from AP and will send it upon d0i3 exit. * from AP and will send it upon d0i3 exit.
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
* thresholds reporting * thresholds reporting
...@@ -352,6 +352,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; ...@@ -352,6 +352,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* power reduction. * power reduction.
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
* @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
* capability.
* *
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/ */
...@@ -392,7 +394,7 @@ enum iwl_ucode_tlv_capa { ...@@ -392,7 +394,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
...@@ -402,6 +404,7 @@ enum iwl_ucode_tlv_capa { ...@@ -402,6 +404,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA NUM_IWL_UCODE_TLV_CAPA
......
...@@ -197,6 +197,29 @@ enum iwl_fw_type { ...@@ -197,6 +197,29 @@ enum iwl_fw_type {
IWL_FW_MVM, IWL_FW_MVM,
}; };
/**
* struct iwl_fw_dbg - debug data
*
* @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
* @n_dest_reg: num of reg_ops in dest_tlv
* @conf_tlv: array of pointers to configuration HCMDs
* @trigger_tlv: array of pointers to triggers TLVs
* @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @mem_tlv: Runtime addresses to dump
* @n_mem_tlv: number of runtime addresses
* @dump_mask: bitmask of dump regions
*/
struct iwl_fw_dbg {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
u8 n_dest_reg;
struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
size_t n_mem_tlv;
u32 dump_mask;
};
/** /**
* struct iwl_fw - variables associated with the firmware * struct iwl_fw - variables associated with the firmware
* *
...@@ -217,12 +240,6 @@ enum iwl_fw_type { ...@@ -217,12 +240,6 @@ enum iwl_fw_type {
* @cipher_scheme: optional external cipher scheme. * @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version * @human_readable: human readable version
* we get the ALIVE from the uCode * we get the ALIVE from the uCode
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
* @dbg_trigger_tlv: array of pointers to triggers TLVs
* @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/ */
struct iwl_fw { struct iwl_fw {
u32 ucode_ver; u32 ucode_ver;
...@@ -250,15 +267,7 @@ struct iwl_fw { ...@@ -250,15 +267,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; struct iwl_fw_dbg dbg;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
u32 dbg_dump_mask;
}; };
static inline const char *get_fw_dbg_mode_string(int mode) static inline const char *get_fw_dbg_mode_string(int mode)
...@@ -280,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode) ...@@ -280,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode)
static inline bool static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{ {
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
if (!conf_tlv) if (!conf_tlv)
return false; return false;
......
...@@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops { ...@@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx); int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx); void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx); bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
}; };
#define MAX_NUM_LMAC 2 #define MAX_NUM_LMAC 2
...@@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg { ...@@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
enum iwl_fw_runtime_status { enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0, IWL_FWRT_STATUS_DUMPING = 0,
IWL_FWRT_STATUS_WAIT_ALIVE,
}; };
/** /**
......
...@@ -168,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) ...@@ -168,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
{ {
int i; int i;
kfree(drv->fw.dbg_dest_tlv); kfree(drv->fw.dbg.dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
kfree(drv->fw.dbg_conf_tlv[i]); kfree(drv->fw.dbg.conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]); kfree(drv->fw.dbg.trigger_tlv[i]);
kfree(drv->fw.dbg_mem_tlv); kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml); kfree(drv->fw.iml);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
...@@ -303,7 +303,7 @@ struct iwl_firmware_pieces { ...@@ -303,7 +303,7 @@ struct iwl_firmware_pieces {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_dbg_mem_tlv; size_t n_mem_tlv;
}; };
/* /*
...@@ -936,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -936,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_INFO(drv, "Found debug destination: %s\n", IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(mon_mode)); get_fw_dbg_mode_string(mon_mode));
drv->fw.dbg_dest_reg_num = (dest_v1) ? drv->fw.dbg.n_dest_reg = (dest_v1) ?
tlv_len - tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv_v1, offsetof(struct iwl_fw_dbg_dest_tlv_v1,
reg_ops) : reg_ops) :
...@@ -944,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -944,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
offsetof(struct iwl_fw_dbg_dest_tlv, offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops); reg_ops);
drv->fw.dbg_dest_reg_num /= drv->fw.dbg.n_dest_reg /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
break; break;
} }
...@@ -959,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -959,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break; break;
} }
if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
IWL_ERR(drv, IWL_ERR(drv,
"Skip unknown configuration: %d\n", "Skip unknown configuration: %d\n",
conf->id); conf->id);
...@@ -988,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -988,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
(void *)tlv_data; (void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id); u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
IWL_ERR(drv, IWL_ERR(drv,
"Skip unknown trigger: %u\n", "Skip unknown trigger: %u\n",
trigger->id); trigger->id);
...@@ -1015,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1015,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break; break;
} }
drv->fw.dbg_dump_mask = drv->fw.dbg.dump_mask =
le32_to_cpup((__le32 *)tlv_data); le32_to_cpup((__le32 *)tlv_data);
break; break;
} }
...@@ -1070,13 +1070,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1070,13 +1070,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
dbg_mem->data_type); dbg_mem->data_type);
size = sizeof(*pieces->dbg_mem_tlv) * size = sizeof(*pieces->dbg_mem_tlv) *
(pieces->n_dbg_mem_tlv + 1); (pieces->n_mem_tlv + 1);
n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
if (!n) if (!n)
return -ENOMEM; return -ENOMEM;
pieces->dbg_mem_tlv = n; pieces->dbg_mem_tlv = n;
pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem; pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
pieces->n_dbg_mem_tlv++; pieces->n_mem_tlv++;
break; break;
} }
case IWL_UCODE_TLV_IML: { case IWL_UCODE_TLV_IML: {
...@@ -1256,7 +1256,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1256,7 +1256,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
/* dump all fw memory areas by default except d3 debug data */ /* dump all fw memory areas by default except d3 debug data */
fw->dbg_dump_mask = 0xfffdffff; fw->dbg.dump_mask = 0xfffdffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces) if (!pieces)
...@@ -1323,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1323,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_free_fw; goto out_free_fw;
if (pieces->dbg_dest_tlv_init) { if (pieces->dbg_dest_tlv_init) {
size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) + size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num; drv->fw.dbg.n_dest_reg;
drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv) if (!drv->fw.dbg.dest_tlv)
goto out_free_fw; goto out_free_fw;
if (*pieces->dbg_dest_ver == 0) { if (*pieces->dbg_dest_ver == 0) {
memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1, memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
dbg_dest_size); dbg_dest_size);
} else { } else {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
drv->fw.dbg_dest_tlv; drv->fw.dbg.dest_tlv;
dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->version = pieces->dbg_dest_tlv->version;
dest_tlv->monitor_mode = dest_tlv->monitor_mode =
...@@ -1352,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1352,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces->dbg_dest_tlv->base_shift; pieces->dbg_dest_tlv->base_shift;
memcpy(dest_tlv->reg_ops, memcpy(dest_tlv->reg_ops,
pieces->dbg_dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops,
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num); drv->fw.dbg.n_dest_reg);
/* In version 1 of the destination tlv, which is /* In version 1 of the destination tlv, which is
* relevant for internal buffer exclusively, * relevant for internal buffer exclusively,
...@@ -1369,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1369,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
} }
} }
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
if (pieces->dbg_conf_tlv[i]) { if (pieces->dbg_conf_tlv[i]) {
drv->fw.dbg_conf_tlv_len[i] = drv->fw.dbg.conf_tlv[i] =
pieces->dbg_conf_tlv_len[i];
drv->fw.dbg_conf_tlv[i] =
kmemdup(pieces->dbg_conf_tlv[i], kmemdup(pieces->dbg_conf_tlv[i],
drv->fw.dbg_conf_tlv_len[i], pieces->dbg_conf_tlv_len[i],
GFP_KERNEL); GFP_KERNEL);
if (!drv->fw.dbg_conf_tlv[i]) if (!pieces->dbg_conf_tlv_len[i])
goto out_free_fw; goto out_free_fw;
} }
} }
...@@ -1404,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1404,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
sizeof(struct iwl_fw_dbg_trigger_tdls); sizeof(struct iwl_fw_dbg_trigger_tdls);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) { if (pieces->dbg_trigger_tlv[i]) {
/* /*
* If the trigger isn't long enough, WARN and exit. * If the trigger isn't long enough, WARN and exit.
...@@ -1417,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1417,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
(trigger_tlv_sz[i] + (trigger_tlv_sz[i] +
sizeof(struct iwl_fw_dbg_trigger_tlv)))) sizeof(struct iwl_fw_dbg_trigger_tlv))))
goto out_free_fw; goto out_free_fw;
drv->fw.dbg_trigger_tlv_len[i] = drv->fw.dbg.trigger_tlv_len[i] =
pieces->dbg_trigger_tlv_len[i]; pieces->dbg_trigger_tlv_len[i];
drv->fw.dbg_trigger_tlv[i] = drv->fw.dbg.trigger_tlv[i] =
kmemdup(pieces->dbg_trigger_tlv[i], kmemdup(pieces->dbg_trigger_tlv[i],
drv->fw.dbg_trigger_tlv_len[i], drv->fw.dbg.trigger_tlv_len[i],
GFP_KERNEL); GFP_KERNEL);
if (!drv->fw.dbg_trigger_tlv[i]) if (!drv->fw.dbg.trigger_tlv[i])
goto out_free_fw; goto out_free_fw;
} }
} }
/* Now that we can no longer fail, copy information */ /* Now that we can no longer fail, copy information */
drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv; drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
pieces->dbg_mem_tlv = NULL; pieces->dbg_mem_tlv = NULL;
drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv; drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
/* /*
* The (size - 16) / 12 formula is based on the information recorded * The (size - 16) / 12 formula is based on the information recorded
...@@ -1473,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1473,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break; break;
default: default:
WARN(1, "Invalid fw type %d\n", fw->type); WARN(1, "Invalid fw type %d\n", fw->type);
/* fall through */
case IWL_FW_MVM: case IWL_FW_MVM:
op = &iwlwifi_opmode_table[MVM_OP_MODE]; op = &iwlwifi_opmode_table[MVM_OP_MODE];
break; break;
......
...@@ -1335,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, ...@@ -1335,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fw->ucode_capa, fw_has_capa(&fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT); IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags; u32 mac_flags;
u32 sbands_flags = 0; u32 sbands_flags = 0;
...@@ -1350,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, ...@@ -1350,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
} }
rsp = (void *)hcmd.resp_pkt->data; rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) empty_otp = !!(le32_to_cpu(rsp->general.flags) &
NVM_GENERAL_FLAGS_EMPTY_OTP);
if (empty_otp)
IWL_INFO(trans, "OTP is empty\n"); IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) + nvm = kzalloc(sizeof(*nvm) +
...@@ -1374,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, ...@@ -1374,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
/* Initialize general data */ /* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
nvm->n_hw_addrs = rsp->general.n_hw_addrs;
if (nvm->n_hw_addrs == 0)
IWL_WARN(trans,
"Firmware declares no reserved mac addresses. OTP is empty: %d\n",
empty_otp);
/* Initialize MAC sku data */ /* Initialize MAC sku data */
mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
......
...@@ -725,7 +725,7 @@ struct iwl_dram_data { ...@@ -725,7 +725,7 @@ struct iwl_dram_data {
* @dbg_dest_tlv: points to the destination TLV for debug * @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
* @num_blocks: number of blocks in fw_mon * @num_blocks: number of blocks in fw_mon
* @fw_mon: address of the buffers for firmware monitor * @fw_mon: address of the buffers for firmware monitor
* @system_pm_mode: the system-wide power management mode in use. * @system_pm_mode: the system-wide power management mode in use.
...@@ -778,7 +778,7 @@ struct iwl_trans { ...@@ -778,7 +778,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask; u32 dbg_dump_mask;
u8 dbg_dest_reg_num; u8 dbg_n_dest_reg;
int num_blocks; int num_blocks;
struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS]; struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
......
...@@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, ...@@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
}; };
int ret, bt_force_ant_mode; int ret, bt_force_ant_mode;
for (bt_force_ant_mode = 0; ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
bt_force_ant_mode < ARRAY_SIZE(modes_str); if (ret < 0)
bt_force_ant_mode++) { return ret;
if (!strcmp(buf, modes_str[bt_force_ant_mode]))
break;
}
if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
return -EINVAL;
bt_force_ant_mode = ret;
ret = 0; ret = 0;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
if (mvm->bt_force_ant_mode == bt_force_ant_mode) if (mvm->bt_force_ant_mode == bt_force_ant_mode)
......
...@@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE }; static const u16 alive_cmd[] = { MVM_ALIVE };
set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
if (ucode_type == IWL_UCODE_REGULAR && if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa, !(fw_has_capa(&mvm->fw->ucode_capa,
...@@ -369,6 +370,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -369,6 +370,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
atomic_set(&mvm->mac80211_queue_stop_count[i], 0); atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0; return 0;
} }
...@@ -699,8 +701,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) ...@@ -699,8 +701,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
enabled = !!(wifi_pkg->package.elements[1].integer.value); enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value; n_profiles = wifi_pkg->package.elements[2].integer.value;
/* in case of BIOS bug */ /*
if (n_profiles <= 0) { * Check the validity of n_profiles. The EWRD profiles start
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL; ret = -EINVAL;
goto out_free; goto out_free;
} }
...@@ -1022,7 +1028,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1022,7 +1028,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */ /* if we have a destination, assume EARLY START */
if (mvm->fw->dbg_dest_tlv) if (mvm->fw->dbg.dest_tlv)
mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
......
...@@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, ...@@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
IWL_MVM_MISSED_BEACONS_THRESHOLD) IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif); ieee80211_beacon_loss(vif);
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS)) FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
return; return;
trigger = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_MISSED_BEACONS);
bcon_trig = (void *)trigger->data; bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
stop_trig_missed_bcon_since_rx = stop_trig_missed_bcon_since_rx =
...@@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, ...@@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
/* TODO: implement start trigger */ /* TODO: implement start trigger */
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
trigger))
return;
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon) rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
......
...@@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig; struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data; ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
switch (action) { switch (action) {
case IEEE80211_AMPDU_TX_OPERATIONAL: { case IEEE80211_AMPDU_TX_OPERATIONAL: {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
...@@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) ...@@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
iwl_mvm_del_aux_sta(mvm); iwl_mvm_del_aux_sta(mvm);
/* /*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
* won't be called in this case). * hw (as restart_complete() won't be called in this case) and mac80211
* won't execute the restart.
* But make sure to cleanup interfaces that have gone down before/during * But make sure to cleanup interfaces that have gone down before/during
* HW restart was requested. * HW restart was requested.
*/ */
if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status))
ieee80211_iterate_interfaces(mvm->hw, 0, ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_cleanup_iterator, mvm); iwl_mvm_cleanup_iterator, mvm);
...@@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, ...@@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tdls *tdls_trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_TDLS);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
tdls_trig = (void *)trig->data; tdls_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(tdls_trig->action_bitmap & BIT(action))) if (!(tdls_trig->action_bitmap & BIT(action)))
return; return;
...@@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, ...@@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme; struct iwl_fw_dbg_trigger_mlme *trig_mlme;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data; trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.data == ASSOC_EVENT) {
if (event->u.mlme.status == MLME_DENIED) if (event->u.mlme.status == MLME_DENIED)
...@@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, ...@@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig; struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data; ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return; return;
......
...@@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, ...@@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
u32 status; u32 status;
int resp_len, n_channels; int resp_len, n_channels;
u16 mcc; u16 mcc;
bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
if (!resp_v2)
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
alpha2[0], alpha2[1], src_id); alpha2[0], alpha2[1], src_id);
...@@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, ...@@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
pkt = cmd.resp_pkt; pkt = cmd.resp_pkt;
/* Extract MCC response */ /* Extract MCC response */
if (resp_v2) { if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels); n_channels = __le32_to_cpu(mcc_resp->n_channels);
...@@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, ...@@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit; goto exit;
} }
} else { } else {
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) + resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32); n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL); resp_cp = kzalloc(resp_len, GFP_KERNEL);
...@@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, ...@@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit; goto exit;
} }
resp_cp->status = mcc_resp_v1->status; resp_cp->status = mcc_resp_v3->status;
resp_cp->mcc = mcc_resp_v1->mcc; resp_cp->mcc = mcc_resp_v3->mcc;
resp_cp->cap = mcc_resp_v1->cap; resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
resp_cp->source_id = mcc_resp_v1->source_id; resp_cp->source_id = mcc_resp_v3->source_id;
resp_cp->n_channels = mcc_resp_v1->n_channels; resp_cp->time = mcc_resp_v3->time;
memcpy(resp_cp->channels, mcc_resp_v1->channels, resp_cp->geo_info = mcc_resp_v3->geo_info;
resp_cp->n_channels = mcc_resp_v3->n_channels;
memcpy(resp_cp->channels, mcc_resp_v3->channels,
n_channels * sizeof(__le32)); n_channels * sizeof(__le32));
} }
......
...@@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx) ...@@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx)
return iwl_mvm_firmware_running(ctx); return iwl_mvm_firmware_running(ctx);
} }
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
int ret;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, host_cmd);
mutex_unlock(&mvm->mutex);
return ret;
}
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start, .dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end, .dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running, .fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
}; };
static struct iwl_op_mode * static struct iwl_op_mode *
...@@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size) if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
else
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size) if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
op_mode = hw->priv; op_mode = hw->priv;
...@@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg); iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
sizeof(trans->dbg_conf_tlv)); sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
trans->iml = mvm->fw->iml; trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len; trans->iml_len = mvm->fw->iml_len;
...@@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true); err = iwl_run_init_mvm_ucode(mvm, true);
if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
iwl_fw_alive_error_dump(&mvm->fwrt);
if (!iwlmvm_mod_params.init_dbg || !err) if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm); iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
...@@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, ...@@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_cmd *cmds_trig; struct iwl_fw_dbg_trigger_cmd *cmds_trig;
int i; int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_FW_NOTIF);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data; cmds_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
/* don't collect on CMD 0 */ /* don't collect on CMD 0 */
if (!cmds_trig->cmds[i].cmd_id) if (!cmds_trig->cmds[i].cmd_id)
...@@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) ...@@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/ */
if (!mvm->fw_restart && fw_error) { if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL); NULL, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe; struct iwl_mvm_reprobe *reprobe;
......
...@@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif = struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif); rcu_dereference(mvm->csa_tx_blocked_vif);
struct iwl_fw_dbg_trigger_tlv *trig;
struct ieee80211_vif *vif = mvmsta->vif;
/* We have tx blocked stations (with CS bit). If we heard /* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can * frames from a blocked station on a new channel we can
* TX to it again. * TX to it again.
*/ */
if (unlikely(tx_blocked_vif) && if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
mvmsta->vif == tx_blocked_vif) {
struct iwl_mvm_vif *mvmvif = struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_vif_from_mac80211(tx_blocked_vif);
...@@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status); rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
ieee80211_is_beacon(hdr->frame_control)) { ieee80211_vif_to_wdev(vif),
struct iwl_fw_dbg_trigger_tlv *trig; FW_DBG_TRIGGER_RSSI);
if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
bool trig_check;
s32 rssi; s32 rssi;
trig = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data; rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi); rssi = le32_to_cpu(rssi_trig->rssi);
trig_check = if (rx_status->signal < rssi)
iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(mvmsta->vif),
trig);
if (trig_check && rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL); NULL);
} }
...@@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) ...@@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
struct iwl_fw_dbg_trigger_stats *trig_stats; struct iwl_fw_dbg_trigger_stats *trig_stats;
u32 trig_offset, trig_thold; u32 trig_offset, trig_thold;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data; trig_stats = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_offset = le32_to_cpu(trig_stats->stop_offset);
trig_thold = le32_to_cpu(trig_stats->stop_threshold); trig_thold = le32_to_cpu(trig_stats->stop_threshold);
......
...@@ -1448,6 +1448,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1448,6 +1448,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
IWL_SCAN_NUM_OF_FRAGS; IWL_SCAN_NUM_OF_FRAGS;
cmd->v8.general_flags2 =
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
} }
cmd->scan_start_mac_id = scan_vif->id; cmd->scan_start_mac_id = scan_vif->id;
......
...@@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, ...@@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_time_event *te_trig; struct iwl_fw_dbg_trigger_time_event *te_trig;
int i; int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
ieee80211_vif_to_wdev(te_data->vif),
FW_DBG_TRIGGER_TIME_EVENT);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
te_trig = (void *)trig->data; te_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(te_data->vif),
trig))
return;
for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
u32 trig_action_bitmap = u32 trig_action_bitmap =
......
...@@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, ...@@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig; struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data; ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return; return;
...@@ -1414,15 +1411,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, ...@@ -1414,15 +1411,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tx_status *status_trig; struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i; int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_TX_STATUS);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
status_trig = (void *)trig->data; status_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
/* don't collect on status 0 */ /* don't collect on status 0 */
if (!status_trig->statuses[i].status) if (!status_trig->statuses[i].status)
......
...@@ -1238,14 +1238,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1238,14 +1238,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme; struct iwl_fw_dbg_trigger_mlme *trig_mlme;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)
goto out; goto out;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data; trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
goto out;
if (trig_mlme->stop_connection_loss && if (trig_mlme->stop_connection_loss &&
--trig_mlme->stop_connection_loss) --trig_mlme->stop_connection_loss)
...@@ -1430,14 +1428,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, ...@@ -1430,14 +1428,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig; struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return; return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data; ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
return; return;
......
...@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) ...@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
kfree(trans_pcie->rxq); kfree(trans_pcie->rxq);
} }
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
struct iwl_rb_allocator *rba)
{
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
}
/* /*
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
* *
...@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, ...@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
/* Move the 2 RBDs to the allocator ownership. /* Move the 2 RBDs to the allocator ownership.
Allocator has another 6 from pool for the request completion*/ Allocator has another 6 from pool for the request completion*/
spin_lock(&rba->lock); iwl_pcie_rx_move_to_allocator(rxq, rba);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
atomic_inc(&rba->req_pending); atomic_inc(&rba->req_pending);
queue_work(rba->alloc_wq, &rba->rx_alloc); queue_work(rba->alloc_wq, &rba->rx_alloc);
...@@ -1400,10 +1406,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1400,10 +1406,18 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) { while (i != r) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
/* number of RBDs still waiting for page allocation */
if (unlikely(rxq->used_count == rxq->queue_size / 2)) u32 rb_pending_alloc =
atomic_read(&trans_pcie->rba.req_pending) *
RX_CLAIM_REQ_ALLOC;
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
!emergency)) {
iwl_pcie_rx_move_to_allocator(rxq, rba);
emergency = true; emergency = true;
}
rxb = iwl_pcie_get_rxb(trans, rxq, i); rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb) if (!rxb)
...@@ -1425,17 +1439,13 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1425,17 +1439,13 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
iwl_pcie_rx_allocator_get(trans, rxq); iwl_pcie_rx_allocator_get(trans, rxq);
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
/* Add the remaining empty RBDs for allocator use */ /* Add the remaining empty RBDs for allocator use */
spin_lock(&rba->lock); iwl_pcie_rx_move_to_allocator(rxq, rba);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
} else if (emergency) { } else if (emergency) {
count++; count++;
if (count == 8) { if (count == 8) {
count = 0; count = 0;
if (rxq->used_count < rxq->queue_size / 3) if (rb_pending_alloc < rxq->queue_size / 3)
emergency = false; emergency = false;
rxq->read = i; rxq->read = i;
......
...@@ -931,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) ...@@ -931,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
else else
IWL_WARN(trans, "PCI should have external buffer debug\n"); IWL_WARN(trans, "PCI should have external buffer debug\n");
for (i = 0; i < trans->dbg_dest_reg_num; i++) { for (i = 0; i < trans->dbg_n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val); u32 val = le32_to_cpu(dest->reg_ops[i].val);
......
...@@ -438,6 +438,8 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, ...@@ -438,6 +438,8 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM; return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag)); skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
out_meta->tbs |= BIT(tb_idx); out_meta->tbs |= BIT(tb_idx);
} }
......
...@@ -2013,6 +2013,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -2013,6 +2013,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
return -EINVAL; return -EINVAL;
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false); skb_frag_size(frag), false);
if (tb_idx < 0)
return tb_idx;
out_meta->tbs |= BIT(tb_idx); out_meta->tbs |= BIT(tb_idx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment