Commit 32a07e44 authored by Kalle Valo's avatar Kalle Valo

ath6kl: create ath6kl_hif_stop()

This is to reset hif layer for powering down hw.
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent cd4b8b85
......@@ -111,4 +111,11 @@ static inline int ath6kl_hif_power_off(struct ath6kl *ar)
return ar->hif_ops->power_off(ar);
}
static inline void ath6kl_hif_stop(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
ar->hif_ops->stop(ar);
}
#endif
......@@ -246,6 +246,7 @@ struct ath6kl_hif_ops {
int (*resume)(struct ath6kl *ar);
int (*power_on)(struct ath6kl *ar);
int (*power_off)(struct ath6kl *ar);
void (*stop)(struct ath6kl *ar);
};
int ath6kl_hif_setup(struct ath6kl_device *dev);
......
......@@ -45,6 +45,8 @@ struct ath6kl_sdio {
struct list_head scat_req;
spinlock_t scat_lock;
bool scatter_enabled;
bool is_disabled;
atomic_t irq_handling;
const struct sdio_device_id *id;
......@@ -651,6 +653,11 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
list_del(&s_req->list);
spin_unlock_bh(&ar_sdio->scat_lock);
/*
* FIXME: should we also call completion handler with
* ath6kl_hif_rw_comp_handler() with status -ECANCELED so
* that the packet is properly freed?
*/
if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->virt_dma_buf);
......@@ -670,6 +677,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
int ret;
bool virt_scat = false;
if (ar_sdio->scatter_enabled)
return 0;
ar_sdio->scatter_enabled = true;
/* check if host supports scatter and it meets our requirements */
if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
......@@ -762,6 +774,38 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
return 0;
}
static void ath6kl_sdio_stop(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *req, *tmp_req;
void *context;
/* FIXME: make sure that wq is not queued again */
cancel_work_sync(&ar_sdio->wr_async_work);
spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
if (req->scat_req) {
/* this is a scatter gather request */
req->scat_req->status = -ECANCELED;
req->scat_req->complete(ar_sdio->ar->htc_target,
req->scat_req);
} else {
context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req);
ath6kl_hif_rw_comp_handler(context, -ECANCELED);
}
}
spin_unlock_bh(&ar_sdio->wr_async_lock);
WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
}
static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.read_write_sync = ath6kl_sdio_read_write_sync,
.write_async = ath6kl_sdio_write_async,
......@@ -776,6 +820,7 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.resume = ath6kl_sdio_resume,
.power_on = ath6kl_sdio_power_on,
.power_off = ath6kl_sdio_power_off,
.stop = ath6kl_sdio_stop,
};
static int ath6kl_sdio_probe(struct sdio_func *func,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment