Commit 46e8f49e authored by Haijun Liu's avatar Haijun Liu Committed by David S. Miller

net: wwan: t7xx: Introduce power management

Implements suspend, resumes, freeze, thaw, poweroff, and restore
`dev_pm_ops` callbacks.

From the host point of view, the t7xx driver is one entity. But, the
device has several modules that need to be addressed in different ways
during power management (PM) flows.
The driver uses the term 'PM entities' to refer to the 2 DPMA and
2 CLDMA HW blocks that need to be managed during PM flows.
When a dev_pm_ops function is called, the PM entities list is iterated
and the matching function is called for each entry in the list.
Signed-off-by: default avatarHaijun Liu <haijun.liu@mediatek.com>
Signed-off-by: default avatarChandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: default avatarRicardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: default avatarRicardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: default avatarIlpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05d19bf5
......@@ -1076,6 +1076,120 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
return 0;
}
static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
int qno_t;
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_restore(hw_info);
for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
MTK_TX);
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
MTK_RX);
}
t7xx_cldma_enable_irq(md_ctrl);
t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
unsigned long flags;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
if (md_ctrl->hif_id == CLDMA_ID_MD)
t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
return 0;
}
static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
t7xx_cldma_clear_ip_busy(hw_info);
t7xx_cldma_disable_irq(md_ctrl);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
if (md_ctrl->hif_id == CLDMA_ID_MD)
t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
md_ctrl->txq_started = 0;
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
return 0;
}
static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
{
md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
if (!md_ctrl->pm_entity)
return -ENOMEM;
md_ctrl->pm_entity->entity_param = md_ctrl;
if (md_ctrl->hif_id == CLDMA_ID_MD)
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
else
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
md_ctrl->pm_entity->resume = t7xx_cldma_resume;
md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
}
static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
{
if (!md_ctrl->pm_entity)
return -EINVAL;
t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
kfree(md_ctrl->pm_entity);
md_ctrl->pm_entity = NULL;
return 0;
}
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
......@@ -1126,6 +1240,7 @@ static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
* t7xx_cldma_init() - Initialize CLDMA.
* @md_ctrl: CLDMA context structure.
*
* Allocate and initialize device power management entity.
* Initialize HIF TX/RX queue structure.
* Register CLDMA callback ISR with PCIe driver.
*
......@@ -1136,12 +1251,16 @@ static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
int i;
int ret, i;
md_ctrl->txq_active = 0;
md_ctrl->rxq_active = 0;
md_ctrl->is_late_init = false;
ret = t7xx_cldma_pm_init(md_ctrl);
if (ret)
return ret;
spin_lock_init(&md_ctrl->cldma_lock);
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
......@@ -1176,6 +1295,7 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
err_workqueue:
t7xx_cldma_destroy_wqs(md_ctrl);
t7xx_cldma_pm_uninit(md_ctrl);
return -ENOMEM;
}
......@@ -1190,4 +1310,5 @@ void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
t7xx_cldma_stop(md_ctrl);
t7xx_cldma_late_release(md_ctrl);
t7xx_cldma_destroy_wqs(md_ctrl);
t7xx_cldma_pm_uninit(md_ctrl);
}
......@@ -98,6 +98,7 @@ struct cldma_ctrl {
struct dma_pool *gpd_dmapool;
struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
......
......@@ -398,6 +398,90 @@ static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
return 0;
}
static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
{
struct dpmaif_ctrl *dpmaif_ctrl = param;
t7xx_dpmaif_tx_stop(dpmaif_ctrl);
t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
t7xx_dpmaif_rx_stop(dpmaif_ctrl);
return 0;
}
static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
{
int qno;
for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
}
static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rxq;
struct dpmaif_tx_queue *txq;
unsigned int que_cnt;
for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
txq = &dpmaif_ctrl->txq[que_cnt];
txq->que_started = true;
}
for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
rxq = &dpmaif_ctrl->rxq[que_cnt];
rxq->que_started = true;
}
}
static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
{
struct dpmaif_ctrl *dpmaif_ctrl = param;
if (!dpmaif_ctrl)
return 0;
t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
t7xx_dpmaif_enable_irq(dpmaif_ctrl);
t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
}
static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
int ret;
INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
dpmaif_pm_entity->suspend_late = NULL;
dpmaif_pm_entity->resume_early = NULL;
dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
dpmaif_pm_entity->entity_param = dpmaif_ctrl;
ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
if (ret)
dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
return ret;
}
static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
int ret;
ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
if (ret < 0)
dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
return ret;
}
int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
{
int ret = 0;
......@@ -461,11 +545,16 @@ struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
if (ret)
return NULL;
t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
if (ret) {
t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
return NULL;
}
......@@ -478,6 +567,7 @@ void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (dpmaif_ctrl->dpmaif_sw_init_done) {
t7xx_dpmaif_stop(dpmaif_ctrl);
t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
t7xx_dpmaif_sw_release(dpmaif_ctrl);
dpmaif_ctrl->dpmaif_sw_init_done = false;
}
......
......@@ -174,6 +174,7 @@ struct dpmaif_callbacks {
struct dpmaif_ctrl {
struct device *dev;
struct t7xx_pci_dev *t7xx_dev;
struct md_pm_entity dpmaif_pm_entity;
enum dpmaif_state state;
bool dpmaif_sw_init_done;
struct dpmaif_hw_info hw_info;
......
......@@ -24,6 +24,11 @@
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \
D2H_INT_RESUME_ACK | \
D2H_INT_SUSPEND_ACK_AP | \
D2H_INT_RESUME_ACK_AP)
static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask)
{
void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
......@@ -53,6 +58,18 @@ static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data)
}
t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
if (int_status & D2H_INT_SR_ACK)
complete(&t7xx_dev->pm_sr_ack);
iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
if (!int_status) {
val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
}
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
return IRQ_HANDLED;
}
......
This diff is collapsed.
......@@ -17,7 +17,9 @@
#ifndef __T7XX_PCI_H__
#define __T7XX_PCI_H__
#include <linux/completion.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/types.h>
......@@ -49,6 +51,10 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md: modem interface
* @ccmni_ctlb: context structure used to control the network data path
* @rgu_pci_irq_en: RGU callback ISR registered and active
* @md_pm_entities: list of pm entities
* @md_pm_entity_mtx: protects md_pm_entities list
* @pm_sr_ack: ack from the device when went to sleep or woke up
* @md_pm_state: state for resume/suspend
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
......@@ -59,6 +65,46 @@ struct t7xx_pci_dev {
struct t7xx_modem *md;
struct t7xx_ccmni_ctrl *ccmni_ctlb;
bool rgu_pci_irq_en;
/* Low Power Items */
struct list_head md_pm_entities;
struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
struct completion pm_sr_ack;
atomic_t md_pm_state;
};
enum t7xx_pm_id {
PM_ENTITY_ID_CTRL1,
PM_ENTITY_ID_CTRL2,
PM_ENTITY_ID_DATA,
PM_ENTITY_ID_INVALID
};
/* struct md_pm_entity - device power management entity
* @entity: list of PM Entities
* @suspend: callback invoked before sending D3 request to device
* @suspend_late: callback invoked after getting D3 ACK from device
* @resume_early: callback invoked before sending the resume request to device
* @resume: callback invoked after getting resume ACK from device
* @id: unique PM entity identifier
* @entity_param: parameter passed to the registered callbacks
*
* This structure is used to indicate PM operations required by internal
* HW modules such as CLDMA and DPMA.
*/
struct md_pm_entity {
struct list_head entity;
int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param);
enum t7xx_pm_id id;
void *entity_param;
};
int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_PCI_H__ */
......@@ -188,6 +188,7 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
case EXCEPTION_EVENT:
dev_err(dev, "Exception event\n");
t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
t7xx_md_exception_handshake(ctl->md);
fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
......@@ -300,6 +301,7 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
return -ETIMEDOUT;
}
t7xx_pci_pm_init_late(md->t7xx_dev);
fsm_routine_ready(ctl);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment