Commit 1f370650 authored by Sara Sharon's avatar Sara Sharon Committed by Luca Coelho

iwlwifi: mvm: support unification of INIT and RT images

For a000 devices the INIT and RT images are unified to one
image.
The changes in the flow are the following:
* Driver load only RT firmware - meaning that the nvm access
  command will be done in the RT image load flow.
* A new command (NVM_ACCESS_COMPLETE) now signals to the FW that
  the driver is done accessing the NVM and FW can proceed with phy
  calibrations.
* Phy DB is no longer sent from INIT FW to be restored by driver
  for the RT FW - all the phy DB is now internal to the FW.
  INIT complete will now follow the NVM access command, without
  phy DB calls before.
* Paging command is sent earlier in the flow before NVM access
  to enable a complete load of FW.
* caution must be care when restart is called since we may have
  not completed init flow even though we are in RT firmware.
Signed-off-by: default avatarSara Sharon <sara.sharon@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 143b0b2a
...@@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids { ...@@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF, STORED_BEACON_NTF = 0xFF,
}; };
enum iwl_regulatory_and_nvm_subcmd_ids {
NVM_ACCESS_COMPLETE = 0x0,
};
enum iwl_fmac_debug_cmds { enum iwl_fmac_debug_cmds {
LMAC_RD_WR = 0x0, LMAC_RD_WR = 0x0,
UMAC_RD_WR = 0x1, UMAC_RD_WR = 0x1,
...@@ -355,6 +359,7 @@ enum { ...@@ -355,6 +359,7 @@ enum {
PHY_OPS_GROUP = 0x4, PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5, DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb, PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf, DEBUG_GROUP = 0xf,
}; };
...@@ -2200,4 +2205,11 @@ struct iwl_dbg_mem_access_rsp { ...@@ -2200,4 +2205,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[]; __le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
*/
struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -522,6 +522,14 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, ...@@ -522,6 +522,14 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
return true; return true;
} }
static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
return true;
}
static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data) struct iwl_rx_packet *pkt, void *data)
{ {
...@@ -537,6 +545,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, ...@@ -537,6 +545,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return false; return false;
} }
static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
{
const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
int ret;
/*
* Configure and operate fw paging mechanism.
* The driver configures the paging flow only once.
* The CPU2 paging image is included in the IWL_UCODE_INIT image.
*/
if (!fw->paging_mem_size)
return 0;
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
return 0;
}
static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type) enum iwl_ucode_type ucode_type)
{ {
...@@ -607,40 +657,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ...@@ -607,40 +657,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
* configure and operate fw paging mechanism.
* driver configures the paging flow only once, CPU2 paging image
* included in the IWL_UCODE_INIT image.
*/
if (fw->paging_mem_size) {
/*
* When dma is not enabled, the driver needs to copy / write
* the downloaded / uploaded page to / from the smem.
* This gets the location of the place were the pages are
* stored.
*/
if (!is_device_dma_capable(mvm->trans->dev)) {
ret = iwl_trans_get_paging_item(mvm);
if (ret) {
IWL_ERR(mvm, "failed to get FW paging item\n");
return ret;
}
}
ret = iwl_save_fw_paging(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to save the FW paging image\n");
return ret;
}
ret = iwl_send_paging_cmd(mvm, fw);
if (ret) {
IWL_ERR(mvm, "failed to send the paging cmd\n");
iwl_free_fw_paging(mvm);
return ret;
}
}
/* /*
* Note: all the queues are enabled as part of the interface * Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they * initialization, but in firmware restart scenarios they
...@@ -798,6 +814,75 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ...@@ -798,6 +814,75 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
return ret; return ret;
} }
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait init_wait;
struct iwl_nvm_access_complete_cmd nvm_complete = {};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
int ret;
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait,
&init_wait,
init_complete,
ARRAY_SIZE(init_complete),
iwl_wait_init_complete,
NULL);
/* Will also start the device */
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error;
}
/* TODO: remove when integrating context info */
ret = iwl_mvm_init_paging(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to init paging: %d\n",
ret);
goto error;
}
/* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm, true);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
goto error;
}
}
/* In case we read the NVM from external file, load it to the NIC */
if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
if (WARN_ON(ret))
goto error;
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE), 0,
sizeof(nvm_complete), &nvm_complete);
if (ret) {
IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
ret);
goto error;
}
/* We wait for the INIT complete notification */
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
MVM_UCODE_ALIVE_TIMEOUT);
error:
iwl_remove_notification(&mvm->notif_wait, &init_wait);
return ret;
}
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
...@@ -1058,23 +1143,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) ...@@ -1058,23 +1143,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
return ret; return ret;
} }
int iwl_mvm_up(struct iwl_mvm *mvm) static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
{ {
int ret, i; int ret;
struct ieee80211_channel *chan;
struct cfg80211_chan_def chandef;
lockdep_assert_held(&mvm->mutex);
ret = iwl_trans_start_hw(mvm->trans); if (iwl_mvm_has_new_tx_api(mvm))
if (ret) return iwl_run_unified_mvm_ucode(mvm, false);
return ret;
/*
* If we haven't completed the run of the init ucode during
* module loading, load init ucode now
* (for example, if we were in RFKILL)
*/
ret = iwl_run_init_mvm_ucode(mvm, false); ret = iwl_run_init_mvm_ucode(mvm, false);
if (iwlmvm_mod_params.init_dbg) if (iwlmvm_mod_params.init_dbg)
...@@ -1085,7 +1160,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1085,7 +1160,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* this can't happen */ /* this can't happen */
if (WARN_ON(ret > 0)) if (WARN_ON(ret > 0))
ret = -ERFKILL; ret = -ERFKILL;
goto error; return ret;
} }
/* /*
...@@ -1096,9 +1171,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1096,9 +1171,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
_iwl_trans_stop_device(mvm->trans, false); _iwl_trans_stop_device(mvm->trans, false);
ret = _iwl_trans_start_hw(mvm->trans, false); ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret) if (ret)
goto error; return ret;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
return ret;
return iwl_mvm_init_paging(mvm);
}
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
struct ieee80211_channel *chan;
struct cfg80211_chan_def chandef;
lockdep_assert_held(&mvm->mutex);
ret = iwl_trans_start_hw(mvm->trans);
if (ret)
return ret;
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) { if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
goto error; goto error;
...@@ -1125,13 +1219,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1125,13 +1219,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error; goto error;
/* Send phy db control command and then phy db calibration*/ /* Send phy db control command and then phy db calibration*/
ret = iwl_send_phy_db_data(mvm->phy_db); if (!iwl_mvm_has_new_tx_api(mvm)) {
if (ret) ret = iwl_send_phy_db_data(mvm->phy_db);
goto error; if (ret)
goto error;
ret = iwl_send_phy_cfg_cmd(mvm); ret = iwl_send_phy_cfg_cmd(mvm);
if (ret) if (ret)
goto error; goto error;
}
/* Init RSS configuration */ /* Init RSS configuration */
if (iwl_mvm_has_new_rx_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) {
......
...@@ -1210,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) ...@@ -1210,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */ /* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm); iwl_mvm_del_aux_sta(mvm);
iwl_free_fw_paging(mvm);
/* /*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case). * won't be called in this case).
......
...@@ -739,6 +739,7 @@ struct iwl_mvm { ...@@ -739,6 +739,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode; enum iwl_ucode_type cur_ucode;
bool ucode_loaded; bool ucode_loaded;
bool hw_registered;
bool calibrating; bool calibrating;
u32 error_event_table; u32 error_event_table;
u32 log_event_table; u32 log_event_table;
...@@ -1257,6 +1258,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); ...@@ -1257,6 +1258,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
******************/ ******************/
/* uCode */ /* uCode */
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */ /* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
...@@ -1686,6 +1688,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -1686,6 +1688,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{ {
iwl_free_fw_paging(mvm);
mvm->ucode_loaded = false; mvm->ucode_loaded = false;
iwl_trans_stop_device(mvm->trans); iwl_trans_stop_device(mvm->trans);
} }
......
...@@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { ...@@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF), HCMD_NAME(STORED_BEACON_NTF),
}; };
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
HCMD_NAME(NVM_ACCESS_COMPLETE),
};
static const struct iwl_hcmd_arr iwl_mvm_groups[] = { static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
...@@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { ...@@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
}; };
/* this forward declaration can avoid to export the function */ /* this forward declaration can avoid to export the function */
...@@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
} }
mvm->sf_state = SF_UNINIT; mvm->sf_state = SF_UNINIT;
mvm->cur_ucode = IWL_UCODE_INIT; if (iwl_mvm_has_new_tx_api(mvm))
mvm->cur_ucode = IWL_UCODE_REGULAR;
else
mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true; mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex); mutex_init(&mvm->mutex);
...@@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true); if (iwl_mvm_has_new_tx_api(mvm))
err = iwl_run_unified_mvm_ucode(mvm, true);
else
err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg) if (!err || !iwlmvm_mod_params.init_dbg)
iwl_mvm_stop_device(mvm); iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
...@@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
err = iwl_mvm_mac_setup_register(mvm); err = iwl_mvm_mac_setup_register(mvm);
if (err) if (err)
goto out_free; goto out_free;
mvm->hw_registered = true;
min_backoff = calc_min_backoff(trans, cfg); min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_thermal_initialize(mvm, min_backoff); iwl_mvm_thermal_initialize(mvm, min_backoff);
...@@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_unregister: out_unregister:
ieee80211_unregister_hw(mvm->hw); ieee80211_unregister_hw(mvm->hw);
mvm->hw_registered = false;
iwl_mvm_leds_exit(mvm); iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm); iwl_mvm_thermal_exit(mvm);
out_free: out_free:
...@@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) ...@@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
reprobe->dev = mvm->trans->dev; reprobe->dev = mvm->trans->dev;
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work); schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
mvm->hw_registered) {
/* don't let the transport/FW power down */ /* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment