Commit 8f1e5d31 authored by John W. Linville's avatar John W. Linville

Merge branch 'for-linville' of git://github.com/kvalo/ath

parents 57afc62e 08b8aa09
...@@ -201,6 +201,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address, ...@@ -201,6 +201,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
\ \
addr = host_interest_item_address(HI_ITEM(item)); \ addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
if (!ret) \
*val = __le32_to_cpu(tmp); \ *val = __le32_to_cpu(tmp); \
ret; \ ret; \
}) })
......
...@@ -329,6 +329,33 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, ...@@ -329,6 +329,33 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
return ret; return ret;
} }
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *src_ring = pipe->src_ring;
u32 ctrl_addr = pipe->ctrl_addr;
lockdep_assert_held(&ar_pci->ce_lock);
/*
* This function must be called only if there is an incomplete
* scatter-gather transfer (before index register is updated)
* that needs to be cleaned up.
*/
if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
return;
if (WARN_ON_ONCE(src_ring->write_index ==
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
return;
src_ring->write_index--;
src_ring->write_index &= src_ring->nentries_mask;
src_ring->per_transfer_context[src_ring->write_index] = NULL;
}
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context, void *per_transfer_context,
u32 buffer, u32 buffer,
......
...@@ -160,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, ...@@ -160,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id, unsigned int transfer_id,
unsigned int flags); unsigned int flags);
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *), void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts); int disable_interrupts);
......
...@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar) ...@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend); complete(&ar->target_suspend);
} }
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
status = ath10k_wmi_connect_htc_service(ar);
if (status)
goto conn_fail;
/* Start HTC */
status = ath10k_htc_start(&ar->htc);
if (status)
goto conn_fail;
/* Wait for WMI event to be ready */
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
ath10k_warn("wmi service ready event not received");
status = -ETIMEDOUT;
goto timeout;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
ath10k_htc_stop(&ar->htc);
conn_fail:
return status;
}
static int ath10k_init_configure_target(struct ath10k *ar) static int ath10k_init_configure_target(struct ath10k *ar)
{ {
u32 param_host; u32 param_host;
...@@ -681,7 +651,8 @@ static void ath10k_core_restart(struct work_struct *work) ...@@ -681,7 +651,8 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) { switch (ar->state) {
case ATH10K_STATE_ON: case ATH10K_STATE_ON:
ar->state = ATH10K_STATE_RESTARTING; ar->state = ATH10K_STATE_RESTARTING;
ath10k_halt(ar); del_timer_sync(&ar->scan.timeout);
ath10k_reset_scan((unsigned long)ar);
ieee80211_restart_hw(ar->hw); ieee80211_restart_hw(ar->hw);
break; break;
case ATH10K_STATE_OFF: case ATH10K_STATE_OFF:
...@@ -690,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work) ...@@ -690,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work)
ath10k_warn("cannot restart a device that hasn't been started\n"); ath10k_warn("cannot restart a device that hasn't been started\n");
break; break;
case ATH10K_STATE_RESTARTING: case ATH10K_STATE_RESTARTING:
/* hw restart might be requested from multiple places */
break;
case ATH10K_STATE_RESTARTED: case ATH10K_STATE_RESTARTED:
ar->state = ATH10K_STATE_WEDGED; ar->state = ATH10K_STATE_WEDGED;
/* fall through */ /* fall through */
...@@ -701,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work) ...@@ -701,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
} }
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
ar = ath10k_mac_create();
if (!ar)
return NULL;
ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw;
ar->p2p = !!ath10k_p2p;
ar->dev = dev;
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
ar->workqueue = create_singlethread_workqueue("ath10k_wq");
if (!ar->workqueue)
goto err_wq;
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
return ar;
err_wq:
ath10k_mac_destroy(ar);
return NULL;
}
EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
ath10k_mac_destroy(ar);
}
EXPORT_SYMBOL(ath10k_core_destroy);
int ath10k_core_start(struct ath10k *ar) int ath10k_core_start(struct ath10k *ar)
{ {
int status; int status;
...@@ -805,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar) ...@@ -805,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar)
goto err; goto err;
} }
status = ath10k_htt_init(ar);
if (status) {
ath10k_err("failed to init htt: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_tx_alloc(&ar->htt);
if (status) {
ath10k_err("failed to alloc htt tx: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
ath10k_err("failed to alloc htt rx: %d\n", status);
goto err_htt_tx_detach;
}
status = ath10k_hif_start(ar); status = ath10k_hif_start(ar);
if (status) { if (status) {
ath10k_err("could not start HIF: %d\n", status); ath10k_err("could not start HIF: %d\n", status);
goto err_wmi_detach; goto err_htt_rx_detach;
} }
status = ath10k_htc_wait_target(&ar->htc); status = ath10k_htc_wait_target(&ar->htc);
...@@ -817,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar) ...@@ -817,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar)
goto err_hif_stop; goto err_hif_stop;
} }
status = ath10k_htt_attach(ar); status = ath10k_htt_connect(&ar->htt);
if (status) { if (status) {
ath10k_err("could not attach htt (%d)\n", status); ath10k_err("failed to connect htt (%d)\n", status);
goto err_hif_stop; goto err_hif_stop;
} }
status = ath10k_init_connect_htc(ar); status = ath10k_wmi_connect(ar);
if (status) if (status) {
goto err_htt_detach; ath10k_err("could not connect wmi: %d\n", status);
goto err_hif_stop;
}
status = ath10k_htc_start(&ar->htc);
if (status) {
ath10k_err("failed to start htc: %d\n", status);
goto err_hif_stop;
}
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
ath10k_warn("wmi service ready event not received");
status = -ETIMEDOUT;
goto err_htc_stop;
}
ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version); ar->hw->wiphy->fw_version);
...@@ -833,23 +775,25 @@ int ath10k_core_start(struct ath10k *ar) ...@@ -833,23 +775,25 @@ int ath10k_core_start(struct ath10k *ar)
status = ath10k_wmi_cmd_init(ar); status = ath10k_wmi_cmd_init(ar);
if (status) { if (status) {
ath10k_err("could not send WMI init command (%d)\n", status); ath10k_err("could not send WMI init command (%d)\n", status);
goto err_disconnect_htc; goto err_htc_stop;
} }
status = ath10k_wmi_wait_for_unified_ready(ar); status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) { if (status <= 0) {
ath10k_err("wmi unified ready event not received\n"); ath10k_err("wmi unified ready event not received\n");
status = -ETIMEDOUT; status = -ETIMEDOUT;
goto err_disconnect_htc; goto err_htc_stop;
} }
status = ath10k_htt_attach_target(&ar->htt); status = ath10k_htt_setup(&ar->htt);
if (status) if (status) {
goto err_disconnect_htc; ath10k_err("failed to setup htt: %d\n", status);
goto err_htc_stop;
}
status = ath10k_debug_start(ar); status = ath10k_debug_start(ar);
if (status) if (status)
goto err_disconnect_htc; goto err_htc_stop;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs); INIT_LIST_HEAD(&ar->arvifs);
...@@ -868,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar) ...@@ -868,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar)
return 0; return 0;
err_disconnect_htc: err_htc_stop:
ath10k_htc_stop(&ar->htc); ath10k_htc_stop(&ar->htc);
err_htt_detach:
ath10k_htt_detach(&ar->htt);
err_hif_stop: err_hif_stop:
ath10k_hif_stop(ar); ath10k_hif_stop(ar);
err_htt_rx_detach:
ath10k_htt_rx_free(&ar->htt);
err_htt_tx_detach:
ath10k_htt_tx_free(&ar->htt);
err_wmi_detach: err_wmi_detach:
ath10k_wmi_detach(ar); ath10k_wmi_detach(ar);
err: err:
...@@ -913,7 +859,9 @@ void ath10k_core_stop(struct ath10k *ar) ...@@ -913,7 +859,9 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_debug_stop(ar); ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc); ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt); ath10k_hif_stop(ar);
ath10k_htt_tx_free(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar); ath10k_wmi_detach(ar);
} }
EXPORT_SYMBOL(ath10k_core_stop); EXPORT_SYMBOL(ath10k_core_stop);
...@@ -1005,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) ...@@ -1005,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
return 0; return 0;
} }
int ath10k_core_register(struct ath10k *ar, u32 chip_id) static void ath10k_core_register_work(struct work_struct *work)
{ {
struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status; int status;
ar->chip_id = chip_id;
status = ath10k_core_check_chip_id(ar);
if (status) {
ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
return status;
}
status = ath10k_core_probe_fw(ar); status = ath10k_core_probe_fw(ar);
if (status) { if (status) {
ath10k_err("could not probe fw (%d)\n", status); ath10k_err("could not probe fw (%d)\n", status);
return status; goto err;
} }
status = ath10k_mac_register(ar); status = ath10k_mac_register(ar);
...@@ -1035,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id) ...@@ -1035,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
goto err_unregister_mac; goto err_unregister_mac;
} }
return 0; set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
err_unregister_mac: err_unregister_mac:
ath10k_mac_unregister(ar); ath10k_mac_unregister(ar);
err_release_fw: err_release_fw:
ath10k_core_free_firmware_files(ar); ath10k_core_free_firmware_files(ar);
err:
device_release_driver(ar->dev);
return;
}
int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
ar->chip_id = chip_id;
status = ath10k_core_check_chip_id(ar);
if (status) {
ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
return status; return status;
}
queue_work(ar->workqueue, &ar->register_work);
return 0;
} }
EXPORT_SYMBOL(ath10k_core_register); EXPORT_SYMBOL(ath10k_core_register);
void ath10k_core_unregister(struct ath10k *ar) void ath10k_core_unregister(struct ath10k *ar)
{ {
cancel_work_sync(&ar->register_work);
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
/* We must unregister from mac80211 before we stop HTC and HIF. /* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be * Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */ * unhappy about callback failures. */
...@@ -1058,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar) ...@@ -1058,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar)
} }
EXPORT_SYMBOL(ath10k_core_unregister); EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
ar = ath10k_mac_create();
if (!ar)
return NULL;
ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw;
ar->p2p = !!ath10k_p2p;
ar->dev = dev;
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
ar->workqueue = create_singlethread_workqueue("ath10k_wq");
if (!ar->workqueue)
goto err_wq;
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_completion(&ar->offchan_tx_completed);
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
return ar;
err_wq:
ath10k_mac_destroy(ar);
return NULL;
}
EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
ath10k_mac_destroy(ar);
}
EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros"); MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
...@@ -335,6 +335,7 @@ enum ath10k_dev_flags { ...@@ -335,6 +335,7 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */ /* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING, ATH10K_CAC_RUNNING,
ATH10K_FLAG_FIRST_BOOT_DONE, ATH10K_FLAG_FIRST_BOOT_DONE,
ATH10K_FLAG_CORE_REGISTERED,
}; };
struct ath10k { struct ath10k {
...@@ -440,6 +441,12 @@ struct ath10k { ...@@ -440,6 +441,12 @@ struct ath10k {
bool radar_enabled; bool radar_enabled;
int num_started_vdevs; int num_started_vdevs;
/* Protected by conf-mutex */
u8 supp_tx_chainmask;
u8 supp_rx_chainmask;
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
struct wmi_pdev_set_wmm_params_arg wmm_params; struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done; struct completion install_key_done;
...@@ -470,6 +477,7 @@ struct ath10k { ...@@ -470,6 +477,7 @@ struct ath10k {
enum ath10k_state state; enum ath10k_state state;
struct work_struct register_work;
struct work_struct restart_work; struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan. /* cycle count is reported twice for each visited channel during scan.
......
...@@ -830,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc) ...@@ -830,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return 0; return 0;
} }
/*
* stop HTC communications, i.e. stop interrupt reception, and flush all
* queued buffers
*/
void ath10k_htc_stop(struct ath10k_htc *htc) void ath10k_htc_stop(struct ath10k_htc *htc)
{ {
spin_lock_bh(&htc->tx_lock); spin_lock_bh(&htc->tx_lock);
htc->stopped = true; htc->stopped = true;
spin_unlock_bh(&htc->tx_lock); spin_unlock_bh(&htc->tx_lock);
ath10k_hif_stop(htc->ar);
} }
/* registered target arrival callback from the HIF layer */ /* registered target arrival callback from the HIF layer */
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "core.h" #include "core.h"
#include "debug.h" #include "debug.h"
static int ath10k_htt_htc_attach(struct ath10k_htt *htt) int ath10k_htt_connect(struct ath10k_htt *htt)
{ {
struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp; struct ath10k_htc_svc_conn_resp conn_resp;
...@@ -48,38 +48,13 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt) ...@@ -48,38 +48,13 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
return 0; return 0;
} }
int ath10k_htt_attach(struct ath10k *ar) int ath10k_htt_init(struct ath10k *ar)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
int ret;
htt->ar = ar; htt->ar = ar;
htt->max_throughput_mbps = 800; htt->max_throughput_mbps = 800;
/*
* Connect to HTC service.
* This has to be done before calling ath10k_htt_rx_attach,
* since ath10k_htt_rx_attach involves sending a rx ring configure
* message to the target.
*/
ret = ath10k_htt_htc_attach(htt);
if (ret) {
ath10k_err("could not attach htt htc (%d)\n", ret);
goto err_htc_attach;
}
ret = ath10k_htt_tx_attach(htt);
if (ret) {
ath10k_err("could not attach htt tx (%d)\n", ret);
goto err_htc_attach;
}
ret = ath10k_htt_rx_attach(htt);
if (ret) {
ath10k_err("could not attach htt rx (%d)\n", ret);
goto err_rx_attach;
}
/* /*
* Prefetch enough data to satisfy target * Prefetch enough data to satisfy target
* classification engine. * classification engine.
...@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar) ...@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */ 2; /* ip4 dscp or ip6 priority */
return 0; return 0;
err_rx_attach:
ath10k_htt_tx_detach(htt);
err_htc_attach:
return ret;
} }
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ) #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
...@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt) ...@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
return 0; return 0;
} }
int ath10k_htt_attach_target(struct ath10k_htt *htt) int ath10k_htt_setup(struct ath10k_htt *htt)
{ {
int status; int status;
...@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt) ...@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)
return ath10k_htt_send_rx_ring_cfg_ll(htt); return ath10k_htt_send_rx_ring_cfg_ll(htt);
} }
void ath10k_htt_detach(struct ath10k_htt *htt)
{
ath10k_htt_rx_detach(htt);
ath10k_htt_tx_detach(htt);
}
...@@ -1328,14 +1328,16 @@ struct htt_rx_desc { ...@@ -1328,14 +1328,16 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
int ath10k_htt_attach(struct ath10k *ar); int ath10k_htt_connect(struct ath10k_htt *htt);
int ath10k_htt_attach_target(struct ath10k_htt *htt); int ath10k_htt_init(struct ath10k *ar);
void ath10k_htt_detach(struct ath10k_htt *htt); int ath10k_htt_setup(struct ath10k_htt *htt);
int ath10k_htt_tx_attach(struct ath10k_htt *htt); int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
void ath10k_htt_tx_detach(struct ath10k_htt *htt); void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_attach(struct ath10k_htt *htt);
void ath10k_htt_rx_detach(struct ath10k_htt *htt); int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
......
...@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) ...@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt); ath10k_htt_rx_msdu_buff_replenish(htt);
} }
void ath10k_htt_rx_detach(struct ath10k_htt *htt) static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
{ {
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; struct sk_buff *skb;
int i;
for (i = 0; i < htt->rx_ring.size; i++) {
skb = htt->rx_ring.netbufs_ring[i];
if (!skb)
continue;
dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
htt->rx_ring.netbufs_ring[i] = NULL;
}
}
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer); del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task); tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task); tasklet_kill(&htt->txrx_compl_task);
...@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt) ...@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
skb_queue_purge(&htt->tx_compl_q); skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_compl_q);
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { ath10k_htt_rx_ring_clean_up(htt);
struct sk_buff *skb =
htt->rx_ring.netbufs_ring[sw_rd_idx];
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
dma_unmap_single(htt->ar->dev, cb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
sw_rd_idx++;
sw_rd_idx &= htt->rx_ring.size_mask;
}
dma_free_coherent(htt->ar->dev, dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size * (htt->rx_ring.size *
...@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) ...@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld; idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx]; msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
idx++; idx++;
idx &= htt->rx_ring.size_mask; idx &= htt->rx_ring.size_mask;
...@@ -306,6 +312,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ...@@ -306,6 +312,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0; int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu; struct sk_buff *msdu;
struct htt_rx_desc *rx_desc; struct htt_rx_desc *rx_desc;
bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock); lockdep_assert_held(&htt->rx_ring.lock);
...@@ -399,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ...@@ -399,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH); RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count; msdu_chained = rx_desc->frag_info.ring2_more_count;
msdu_chaining = msdu_chained;
if (msdu_len_invalid) if (msdu_len_invalid)
msdu_len = 0; msdu_len = 0;
...@@ -427,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ...@@ -427,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next; msdu->next = next;
msdu = next; msdu = next;
msdu_chaining = 1;
} }
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU; RX_MSDU_END_INFO0_LAST_MSDU;
if (msdu_chaining && !last_msdu)
corrupted = true;
if (last_msdu) { if (last_msdu) {
msdu->next = NULL; msdu->next = NULL;
break; break;
...@@ -446,6 +456,20 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ...@@ -446,6 +456,20 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
if (*head_msdu == NULL) if (*head_msdu == NULL)
msdu_chaining = -1; msdu_chaining = -1;
/*
* Apparently FW sometimes reports weird chained MSDU sequences with
* more than one rx descriptor. This seems like a bug but needs more
* analyzing. For the time being fix it by dropping such sequences to
* avoid blowing up the host system.
*/
if (corrupted) {
ath10k_warn("failed to pop chained msdus, dropping\n");
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
*tail_msdu = NULL;
msdu_chaining = -EINVAL;
}
/* /*
* Don't refill the ring yet. * Don't refill the ring yet.
* *
...@@ -468,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr) ...@@ -468,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
ath10k_htt_rx_msdu_buff_replenish(htt); ath10k_htt_rx_msdu_buff_replenish(htt);
} }
int ath10k_htt_rx_attach(struct ath10k_htt *htt) int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{ {
dma_addr_t paddr; dma_addr_t paddr;
void *vaddr; void *vaddr;
...@@ -494,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt) ...@@ -494,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
htt->rx_ring.netbufs_ring = htt->rx_ring.netbufs_ring =
kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!htt->rx_ring.netbufs_ring) if (!htt->rx_ring.netbufs_ring)
goto err_netbuf; goto err_netbuf;
...@@ -754,17 +778,30 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, ...@@ -754,17 +778,30 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt, static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
struct ieee80211_rx_status *rx_status, struct ieee80211_rx_status *rx_status,
struct sk_buff *skb, struct sk_buff *skb,
enum htt_rx_mpdu_encrypt_type enctype) enum htt_rx_mpdu_encrypt_type enctype,
enum rx_msdu_decap_format fmt,
bool dot11frag)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
rx_status->flag &= ~(RX_FLAG_DECRYPTED | rx_status->flag &= ~(RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED | RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED); RX_FLAG_MMIC_STRIPPED);
if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
return;
/*
* There's no explicit rx descriptor flag to indicate whether a given
* frame has been decrypted or not. We're forced to use the decap
* format as an implicit indication. However fragmentation rx is always
* raw and it probably never reports undecrypted raws.
*
* This makes sure sniffed frames are reported as-is without stripping
* the protected flag.
*/
if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
return; return;
}
rx_status->flag |= RX_FLAG_DECRYPTED | rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED | RX_FLAG_IV_STRIPPED |
...@@ -918,7 +955,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, ...@@ -918,7 +955,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
} }
skb_in = skb; skb_in = skb;
ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype); ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
false);
skb = skb->next; skb = skb->next;
skb_in->next = NULL; skb_in->next = NULL;
...@@ -1000,7 +1038,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, ...@@ -1000,7 +1038,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
break; break;
} }
ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype); ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
ath10k_process_rx(htt->ar, rx_status, skb); ath10k_process_rx(htt->ar, rx_status, skb);
} }
...@@ -1288,6 +1326,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, ...@@ -1288,6 +1326,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
} }
/* FIXME: implement signal strength */ /* FIXME: implement signal strength */
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
hdr = (struct ieee80211_hdr *)msdu_head->data; hdr = (struct ieee80211_hdr *)msdu_head->data;
rxd = (void *)msdu_head->data - sizeof(*rxd); rxd = (void *)msdu_head->data - sizeof(*rxd);
...@@ -1306,7 +1345,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, ...@@ -1306,7 +1345,8 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE); RX_MPDU_START_INFO0_ENCRYPT_TYPE);
ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype); ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
true);
msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
if (tkip_mic_err) if (tkip_mic_err)
......
...@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) ...@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
__clear_bit(msdu_id, htt->used_msdu_ids); __clear_bit(msdu_id, htt->used_msdu_ids);
} }
int ath10k_htt_tx_attach(struct ath10k_htt *htt) int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{ {
spin_lock_init(&htt->tx_lock); spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq); init_waitqueue_head(&htt->empty_tx_wq);
...@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) ...@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return 0; return 0;
} }
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{ {
struct htt_tx_done tx_done = {0}; struct htt_tx_done tx_done = {0};
int msdu_id; int msdu_id;
...@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) ...@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
} }
void ath10k_htt_tx_detach(struct ath10k_htt *htt) void ath10k_htt_tx_free(struct ath10k_htt *htt)
{ {
ath10k_htt_tx_cleanup_pending(htt); ath10k_htt_tx_free_pending(htt);
kfree(htt->pending_tx); kfree(htt->pending_tx);
kfree(htt->used_msdu_ids); kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool); dma_pool_destroy(htt->tx_pool);
......
...@@ -54,6 +54,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif, ...@@ -54,6 +54,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
switch (key->cipher) { switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM; arg.key_cipher = WMI_CIPHER_AES_CCM;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
else
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break; break;
case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_TKIP:
...@@ -1888,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) ...@@ -1888,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
wep_key_work); wep_key_work);
int ret, keyidx = arvif->def_wep_key_newidx; int ret, keyidx = arvif->def_wep_key_newidx;
mutex_lock(&arvif->ar->conf_mutex);
if (arvif->ar->state != ATH10K_STATE_ON)
goto unlock;
if (arvif->def_wep_key_idx == keyidx) if (arvif->def_wep_key_idx == keyidx)
return; goto unlock;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
arvif->vdev_id, keyidx); arvif->vdev_id, keyidx);
...@@ -1902,10 +1910,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) ...@@ -1902,10 +1910,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
ath10k_warn("failed to update wep key index for vdev %d: %d\n", ath10k_warn("failed to update wep key index for vdev %d: %d\n",
arvif->vdev_id, arvif->vdev_id,
ret); ret);
return; goto unlock;
} }
arvif->def_wep_key_idx = keyidx; arvif->def_wep_key_idx = keyidx;
unlock:
mutex_unlock(&arvif->ar->conf_mutex);
} }
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
...@@ -2286,9 +2297,19 @@ static void ath10k_tx(struct ieee80211_hw *hw, ...@@ -2286,9 +2297,19 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_htt(ar, skb); ath10k_tx_htt(ar, skb);
} }
/* /* Must not be called with conf_mutex held as workers can use that also. */
* Initialize various parameters with default vaules. static void ath10k_drain_tx(struct ath10k *ar)
*/ {
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
ath10k_offchan_tx_purge(ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
cancel_work_sync(&ar->offchan_tx_work);
cancel_work_sync(&ar->wmi_mgmt_tx_work);
}
void ath10k_halt(struct ath10k *ar) void ath10k_halt(struct ath10k *ar)
{ {
struct ath10k_vif *arvif; struct ath10k_vif *arvif;
...@@ -2303,19 +2324,12 @@ void ath10k_halt(struct ath10k *ar) ...@@ -2303,19 +2324,12 @@ void ath10k_halt(struct ath10k *ar)
} }
del_timer_sync(&ar->scan.timeout); del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar); ath10k_reset_scan((unsigned long)ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
ath10k_peer_cleanup_all(ar); ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar); ath10k_core_stop(ar);
ath10k_hif_power_down(ar); ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
if (ar->scan.in_progress) {
del_timer(&ar->scan.timeout);
ar->scan.in_progress = false;
ieee80211_scan_completed(ar->hw, true);
}
list_for_each_entry(arvif, &ar->arvifs, list) { list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->beacon) if (!arvif->beacon)
continue; continue;
...@@ -2329,46 +2343,125 @@ void ath10k_halt(struct ath10k *ar) ...@@ -2329,46 +2343,125 @@ void ath10k_halt(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath10k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
if (ar->cfg_tx_chainmask) {
*tx_ant = ar->cfg_tx_chainmask;
*rx_ant = ar->cfg_rx_chainmask;
} else {
*tx_ant = ar->supp_tx_chainmask;
*rx_ant = ar->supp_rx_chainmask;
}
mutex_unlock(&ar->conf_mutex);
return 0;
}
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
if ((ar->state != ATH10K_STATE_ON) &&
(ar->state != ATH10K_STATE_RESTARTED))
return 0;
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
tx_ant);
if (ret) {
ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
ret, tx_ant);
return ret;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
rx_ant);
if (ret) {
ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
ret, rx_ant);
return ret;
}
return 0;
}
static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath10k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath10k_start(struct ieee80211_hw *hw) static int ath10k_start(struct ieee80211_hw *hw)
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
int ret = 0; int ret = 0;
/*
* This makes sense only when restarting hw. It is harmless to call
* uncoditionally. This is necessary to make sure no HTT/WMI tx
* commands will be submitted while restarting.
*/
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_OFF && switch (ar->state) {
ar->state != ATH10K_STATE_RESTARTING) { case ATH10K_STATE_OFF:
ar->state = ATH10K_STATE_ON;
break;
case ATH10K_STATE_RESTARTING:
ath10k_halt(ar);
ar->state = ATH10K_STATE_RESTARTED;
break;
case ATH10K_STATE_ON:
case ATH10K_STATE_RESTARTED:
case ATH10K_STATE_WEDGED:
WARN_ON(1);
ret = -EINVAL; ret = -EINVAL;
goto exit; goto err;
} }
ret = ath10k_hif_power_up(ar); ret = ath10k_hif_power_up(ar);
if (ret) { if (ret) {
ath10k_err("Could not init hif: %d\n", ret); ath10k_err("Could not init hif: %d\n", ret);
ar->state = ATH10K_STATE_OFF; goto err_off;
goto exit;
} }
ret = ath10k_core_start(ar); ret = ath10k_core_start(ar);
if (ret) { if (ret) {
ath10k_err("Could not init core: %d\n", ret); ath10k_err("Could not init core: %d\n", ret);
ath10k_hif_power_down(ar); goto err_power_down;
ar->state = ATH10K_STATE_OFF;
goto exit;
} }
if (ar->state == ATH10K_STATE_OFF)
ar->state = ATH10K_STATE_ON;
else if (ar->state == ATH10K_STATE_RESTARTING)
ar->state = ATH10K_STATE_RESTARTED;
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret) if (ret) {
ath10k_warn("failed to enable PMF QOS: %d\n", ret); ath10k_warn("failed to enable PMF QOS: %d\n", ret);
goto err_core_stop;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
if (ret) if (ret) {
ath10k_warn("failed to enable dynamic BW: %d\n", ret); ath10k_warn("failed to enable dynamic BW: %d\n", ret);
goto err_core_stop;
}
if (ar->cfg_tx_chainmask)
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
ar->cfg_rx_chainmask);
/* /*
* By default FW set ARP frames ac to voice (6). In that case ARP * By default FW set ARP frames ac to voice (6). In that case ARP
...@@ -2384,14 +2477,25 @@ static int ath10k_start(struct ieee80211_hw *hw) ...@@ -2384,14 +2477,25 @@ static int ath10k_start(struct ieee80211_hw *hw)
if (ret) { if (ret) {
ath10k_warn("failed to set arp ac override parameter: %d\n", ath10k_warn("failed to set arp ac override parameter: %d\n",
ret); ret);
goto exit; goto err_core_stop;
} }
ar->num_started_vdevs = 0; ar->num_started_vdevs = 0;
ath10k_regd_update(ar); ath10k_regd_update(ar);
ret = 0;
exit: mutex_unlock(&ar->conf_mutex);
return 0;
err_core_stop:
ath10k_core_stop(ar);
err_power_down:
ath10k_hif_power_down(ar);
err_off:
ar->state = ATH10K_STATE_OFF;
err:
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return ret; return ret;
} }
...@@ -2400,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw) ...@@ -2400,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw)
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
ath10k_drain_tx(ar);
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_ON || if (ar->state != ATH10K_STATE_OFF) {
ar->state == ATH10K_STATE_RESTARTED ||
ar->state == ATH10K_STATE_WEDGED)
ath10k_halt(ar); ath10k_halt(ar);
ar->state = ATH10K_STATE_OFF; ar->state = ATH10K_STATE_OFF;
}
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
ath10k_mgmt_over_wmi_tx_purge(ar);
cancel_work_sync(&ar->offchan_tx_work);
cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work); cancel_work_sync(&ar->restart_work);
} }
...@@ -2925,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ...@@ -2925,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid; arvif->u.ap.hidden_ssid = info->hidden_ssid;
} }
if (changed & BSS_CHANGED_BSSID) { /*
* Firmware manages AP self-peer internally so make sure to not create
* it in driver. Otherwise AP self-peer deletion may timeout later.
*/
if (changed & BSS_CHANGED_BSSID &&
vif->type != NL80211_IFTYPE_AP) {
if (!is_zero_ether_addr(info->bssid)) { if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ATH10K_DBG_MAC, ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n", "mac vdev %d create peer %pM\n",
...@@ -4142,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, ...@@ -4142,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
fixed_nss, force_sgi); fixed_nss, force_sgi);
} }
static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
/* there's no need to do anything here. vif->csa_active is enough */
return;
}
static void ath10k_sta_rc_update(struct ieee80211_hw *hw, static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
...@@ -4253,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = { ...@@ -4253,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = {
.set_frag_threshold = ath10k_set_frag_threshold, .set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush, .flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon, .tx_last_beacon = ath10k_tx_last_beacon,
.set_antenna = ath10k_set_antenna,
.get_antenna = ath10k_get_antenna,
.restart_complete = ath10k_restart_complete, .restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey, .get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask, .set_bitrate_mask = ath10k_set_bitrate_mask,
.channel_switch_beacon = ath10k_channel_switch_beacon,
.sta_rc_update = ath10k_sta_rc_update, .sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf, .get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM #ifdef CONFIG_PM
...@@ -4602,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar) ...@@ -4602,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP); BIT(NL80211_IFTYPE_AP);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
/* TODO: Have to deal with 2x2 chips if/when the come out. */
ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
} else {
ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
}
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
ar->hw->wiphy->interface_modes |= ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_CLIENT) |
......
...@@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); ...@@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
/* how long wait to wait for target to initialise, in ms */ /* how long wait to wait for target to initialise, in ms */
#define ATH10K_PCI_TARGET_WAIT 3000 #define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
#define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA988X_2_0_DEVICE_ID (0x003c)
...@@ -761,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ...@@ -761,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
unsigned int nentries_mask = src_ring->nentries_mask; unsigned int nentries_mask;
unsigned int sw_index = src_ring->sw_index; unsigned int sw_index;
unsigned int write_index = src_ring->write_index; unsigned int write_index;
int err, i; int err, i = 0;
spin_lock_bh(&ar_pci->ce_lock); spin_lock_bh(&ar_pci->ce_lock);
nentries_mask = src_ring->nentries_mask;
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
if (unlikely(CE_RING_DELTA(nentries_mask, if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) < n_items)) { write_index, sw_index - 1) < n_items)) {
err = -ENOBUFS; err = -ENOBUFS;
goto unlock; goto err;
} }
for (i = 0; i < n_items - 1; i++) { for (i = 0; i < n_items - 1; i++) {
...@@ -788,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ...@@ -788,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id, items[i].transfer_id,
CE_SEND_FLAG_GATHER); CE_SEND_FLAG_GATHER);
if (err) if (err)
goto unlock; goto err;
} }
/* `i` is equal to `n_items -1` after for() */ /* `i` is equal to `n_items -1` after for() */
...@@ -806,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ...@@ -806,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id, items[i].transfer_id,
0); 0);
if (err) if (err)
goto unlock; goto err;
spin_unlock_bh(&ar_pci->ce_lock);
return 0;
err:
for (; i > 0; i--)
__ath10k_ce_send_revert(ce_pipe);
err = 0;
unlock:
spin_unlock_bh(&ar_pci->ce_lock); spin_unlock_bh(&ar_pci->ce_lock);
return err; return err;
} }
...@@ -1271,6 +1281,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ...@@ -1271,6 +1281,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n"); ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
if (WARN_ON(!ar_pci->started))
return;
ret = ath10k_ce_disable_interrupts(ar); ret = ath10k_ce_disable_interrupts(ar);
if (ret) if (ret)
ath10k_warn("failed to disable CE interrupts: %d\n", ret); ath10k_warn("failed to disable CE interrupts: %d\n", ret);
...@@ -1802,6 +1815,26 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) ...@@ -1802,6 +1815,26 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
} }
/* this function effectively clears target memory controller assert line */
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
{
u32 val;
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
val | SOC_RESET_CONTROL_SI0_RST_MASK);
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
msleep(10);
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
msleep(10);
}
static int ath10k_pci_warm_reset(struct ath10k *ar) static int ath10k_pci_warm_reset(struct ath10k *ar)
{ {
int ret = 0; int ret = 0;
...@@ -1860,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) ...@@ -1860,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
SOC_RESET_CONTROL_ADDRESS); SOC_RESET_CONTROL_ADDRESS);
msleep(10); msleep(10);
ath10k_pci_warm_reset_si0(ar);
/* debug */ /* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS); PCIE_INTR_CAUSE_ADDRESS);
...@@ -1988,6 +2023,28 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) ...@@ -1988,6 +2023,28 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
return ret; return ret;
} }
static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
{
int i, ret;
/*
* Sometime warm reset succeeds after retries.
*
* FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
* at first try.
*/
for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
ret = __ath10k_pci_hif_power_up(ar, false);
if (ret == 0)
break;
ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
}
return ret;
}
static int ath10k_pci_hif_power_up(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar)
{ {
int ret; int ret;
...@@ -1999,10 +2056,10 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ...@@ -1999,10 +2056,10 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
* preferred (and safer) way to perform a device reset is through a * preferred (and safer) way to perform a device reset is through a
* warm reset. * warm reset.
* *
* Warm reset doesn't always work though (notably after a firmware * Warm reset doesn't always work though so fall back to cold reset may
* crash) so fall back to cold reset if necessary. * be necessary.
*/ */
ret = __ath10k_pci_hif_power_up(ar, false); ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) { if (ret) {
ath10k_warn("failed to power up target using warm reset: %d\n", ath10k_warn("failed to power up target using warm reset: %d\n",
ret); ret);
...@@ -2196,10 +2253,7 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data) ...@@ -2196,10 +2253,7 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
if (fw_ind & FW_IND_EVENT_PENDING) { if (fw_ind & FW_IND_EVENT_PENDING) {
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING); fw_ind & ~FW_IND_EVENT_PENDING);
ath10k_pci_hif_dump_area(ar);
/* Some structures are unavailable during early boot or at
* driver teardown so just print that the device has crashed. */
ath10k_warn("device crashed - no diagnostics available\n");
} }
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
...@@ -2476,6 +2530,9 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) ...@@ -2476,6 +2530,9 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_EVENT_PENDING) { if (val & FW_IND_EVENT_PENDING) {
ath10k_warn("device has crashed during init\n"); ath10k_warn("device has crashed during init\n");
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
val & ~FW_IND_EVENT_PENDING);
ath10k_pci_hif_dump_area(ar);
ret = -ECOMM; ret = -ECOMM;
goto out; goto out;
} }
...@@ -2602,18 +2659,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2602,18 +2659,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ar); pci_set_drvdata(pdev, ar);
/*
* Without any knowledge of the Host, the Target may have been reset or
* power cycled and its Config Space may no longer reflect the PCI
* address space that was assigned earlier by the PCI infrastructure.
* Refresh it now.
*/
ret = pci_assign_resource(pdev, BAR_NUM);
if (ret) {
ath10k_err("failed to assign PCI space: %d\n", ret);
goto err_ar;
}
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
ath10k_err("failed to enable PCI device: %d\n", ret); ath10k_err("failed to enable PCI device: %d\n", ret);
...@@ -2725,8 +2770,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ...@@ -2725,8 +2770,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
if (!ar_pci) if (!ar_pci)
return; return;
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar); ath10k_core_unregister(ar);
ath10k_pci_free_ce(ar); ath10k_pci_free_ce(ar);
......
...@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) ...@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *wmi_skb; struct sk_buff *wmi_skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int len; int len;
u32 buf_len = skb->len;
u16 fc; u16 fc;
hdr = (struct ieee80211_hdr *)skb->data; hdr = (struct ieee80211_hdr *)skb->data;
...@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) ...@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
return -EINVAL; return -EINVAL;
len = sizeof(cmd->hdr) + skb->len; len = sizeof(cmd->hdr) + skb->len;
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
len += IEEE80211_CCMP_MIC_LEN;
buf_len += IEEE80211_CCMP_MIC_LEN;
}
len = round_up(len, 4); len = round_up(len, 4);
wmi_skb = ath10k_wmi_alloc_skb(len); wmi_skb = ath10k_wmi_alloc_skb(len);
...@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) ...@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
cmd->hdr.tx_rate = 0; cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0; cmd->hdr.tx_power = 0;
cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len)); cmd->hdr.buf_len = __cpu_to_le32(buf_len);
memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(cmd->buf, skb->data, skb->len); memcpy(cmd->buf, skb->data, skb->len);
...@@ -957,11 +967,17 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) ...@@ -957,11 +967,17 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
* frames with Protected Bit set. */ * frames with Protected Bit set. */
if (ieee80211_has_protected(hdr->frame_control) && if (ieee80211_has_protected(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control)) { !ieee80211_is_auth(hdr->frame_control)) {
status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_action(hdr->frame_control) &&
!ieee80211_is_deauth(hdr->frame_control) &&
!ieee80211_is_disassoc(hdr->frame_control)) {
status->flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED; RX_FLAG_MMIC_STRIPPED;
hdr->frame_control = __cpu_to_le16(fc & hdr->frame_control = __cpu_to_le16(fc &
~IEEE80211_FCTL_PROTECTED); ~IEEE80211_FCTL_PROTECTED);
} }
}
ath10k_dbg(ATH10K_DBG_MGMT, ath10k_dbg(ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n", "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
...@@ -2359,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar) ...@@ -2359,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0; ar->wmi.num_mem_chunks = 0;
} }
int ath10k_wmi_connect_htc_service(struct ath10k *ar) int ath10k_wmi_connect(struct ath10k *ar)
{ {
int status; int status;
struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_req conn_req;
......
...@@ -2323,9 +2323,9 @@ struct wmi_pdev_param_map { ...@@ -2323,9 +2323,9 @@ struct wmi_pdev_param_map {
#define WMI_PDEV_PARAM_UNSUPPORTED 0 #define WMI_PDEV_PARAM_UNSUPPORTED 0
enum wmi_pdev_param { enum wmi_pdev_param {
/* TX chian mask */ /* TX chain mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
/* RX chian mask */ /* RX chain mask */
WMI_PDEV_PARAM_RX_CHAIN_MASK, WMI_PDEV_PARAM_RX_CHAIN_MASK,
/* TX power limit for 2G Radio */ /* TX power limit for 2G Radio */
WMI_PDEV_PARAM_TXPOWER_LIMIT2G, WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
...@@ -4259,7 +4259,7 @@ void ath10k_wmi_detach(struct ath10k *ar); ...@@ -4259,7 +4259,7 @@ void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar); int ath10k_wmi_connect(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar, int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *); const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
......
...@@ -2230,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) ...@@ -2230,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(ATH_OP_SCANNING, &common->op_flags); clear_bit(ATH_OP_SCANNING, &common->op_flags);
} }
static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
/* depend on vif->csa_active only */
return;
}
struct ieee80211_ops ath9k_ops = { struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx, .tx = ath9k_tx,
.start = ath9k_start, .start = ath9k_start,
...@@ -2285,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = { ...@@ -2285,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = {
#endif #endif
.sw_scan_start = ath9k_sw_scan_start, .sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete, .sw_scan_complete = ath9k_sw_scan_complete,
.channel_switch_beacon = ath9k_channel_switch_beacon,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment