Commit 39d43904 authored by Haijun Liu's avatar Haijun Liu Committed by David S. Miller

net: wwan: t7xx: Add control DMA interface

Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control
path of Host-Modem data transfers. CLDMA HIF layer provides a common
interface to the Port Layer.

CLDMA manages 8 independent RX/TX physical channels with data flow
control in HW queues. CLDMA uses ring buffers of General Packet
Descriptors (GPD) for TX/RX. GPDs can represent multiple or single
data buffers (DB).

CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA
interrupts, and initializes CLDMA HW registers.

CLDMA TX flow:
1. Port Layer write
2. Get DB address
3. Configure GPD
4. Triggering processing via HW register write

CLDMA RX flow:
1. CLDMA HW sends a RX "done" to host
2. Driver starts thread to safely read GPD
3. DB is sent to Port layer
4. Create a new buffer for GPD ring

Note: This patch does not enable compilation since it has dependencies
such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and
struct t7xx_pci_dev which are added by the core patch.
Signed-off-by: default avatarHaijun Liu <haijun.liu@mediatek.com>
Signed-off-by: default avatarChandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: default avatarRicardo Martinez <ricardo.martinez@linux.intel.com>
Signed-off-by: default avatarRicardo Martinez <ricardo.martinez@linux.intel.com>
Reviewed-by: default avatarLoic Poulain <loic.poulain@linaro.org>
Reviewed-by: default avatarIlpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: default avatarSergey Ryazanov <ryazanov.s.a@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a4ff3653
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h>
#include "t7xx_cldma.h"
#define ADDR_SIZE 8
void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
{
u32 val;
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
val |= IP_BUSY_WAKEUP;
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
}
/**
* t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
* @hw_info: Pointer to struct t7xx_cldma_hw.
*
* Restore HW after resume. Writes uplink configuration for CLDMA HW.
*/
void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
{
u32 ul_cfg;
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
if (hw_info->hw_mode == MODE_BIT_64)
ul_cfg |= UL_CFG_BIT_MODE_64;
else if (hw_info->hw_mode == MODE_BIT_40)
ul_cfg |= UL_CFG_BIT_MODE_40;
else if (hw_info->hw_mode == MODE_BIT_36)
ul_cfg |= UL_CFG_BIT_MODE_36;
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
/* Disable TX and RX invalid address check */
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
}
void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
{
/* Enable the TX & RX interrupts */
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
/* Enable the empty queue interrupt */
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
}
void t7xx_cldma_hw_reset(void __iomem *ao_base)
{
u32 val;
val = ioread32(ao_base + REG_INFRA_RST2_SET);
val |= RST2_PMIC_SW_RST_SET;
iowrite32(val, ao_base + REG_INFRA_RST2_SET);
val = ioread32(ao_base + REG_INFRA_RST4_SET);
val |= RST4_CLDMA1_SW_RST_SET;
iowrite32(val, ao_base + REG_INFRA_RST4_SET);
udelay(1);
val = ioread32(ao_base + REG_INFRA_RST4_CLR);
val |= RST4_CLDMA1_SW_RST_CLR;
iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
val = ioread32(ao_base + REG_INFRA_RST2_CLR);
val |= RST2_PMIC_SW_RST_CLR;
iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
}
bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
return ioread64(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
enum mtk_txrx tx_rx)
{
u32 offset = qno * ADDR_SIZE;
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
iowrite64(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *base = hw_info->ap_pdn_base;
if (tx_rx == MTK_RX)
iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
else
iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
}
unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 mask, val;
mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
val = ioread32(reg);
return val & mask;
}
void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
{
unsigned int ch_id;
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
ch_id &= bitmask;
/* Clear the ch IDs in the TX interrupt status register */
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
}
void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
{
unsigned int ch_id;
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
ch_id &= bitmask;
/* Clear the ch IDs in the RX interrupt status register */
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
}
unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
val = ioread32(reg);
return val & bitmask;
}
void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
}
void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
}
/**
* t7xx_cldma_hw_init() - Initialize CLDMA HW.
* @hw_info: Pointer to struct t7xx_cldma_hw.
*
* Write uplink and downlink configuration to CLDMA HW.
*/
void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
{
u32 ul_cfg, dl_cfg;
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
/* Configure the DRAM address mode */
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
if (hw_info->hw_mode == MODE_BIT_64) {
ul_cfg |= UL_CFG_BIT_MODE_64;
dl_cfg |= DL_CFG_BIT_MODE_64;
} else if (hw_info->hw_mode == MODE_BIT_40) {
ul_cfg |= UL_CFG_BIT_MODE_40;
dl_cfg |= DL_CFG_BIT_MODE_40;
} else if (hw_info->hw_mode == MODE_BIT_36) {
ul_cfg |= UL_CFG_BIT_MODE_36;
dl_cfg |= DL_CFG_BIT_MODE_36;
}
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
dl_cfg |= DL_CFG_UP_HW_LAST;
iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
}
void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
{
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
iowrite32(CLDMA_ALL_Q, reg);
}
void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
{
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
iowrite32(TXRX_STATUS_BITMASK, reg);
iowrite32(EMPTY_STATUS_BITMASK, reg);
}
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#ifndef __T7XX_CLDMA_H__
#define __T7XX_CLDMA_H__
#include <linux/bits.h>
#include <linux/types.h>
#define CLDMA_TXQ_NUM 8
#define CLDMA_RXQ_NUM 8
#define CLDMA_ALL_Q GENMASK(7, 0)
/* Interrupt status bits */
#define EMPTY_STATUS_BITMASK GENMASK(15, 8)
#define TXRX_STATUS_BITMASK GENMASK(7, 0)
#define EQ_STA_BIT_OFFSET 8
#define L2_INT_BIT_COUNT 16
#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK)
#define TQ_ERR_INT_BITMASK GENMASK(23, 16)
#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
#define RQ_ERR_INT_BITMASK GENMASK(23, 16)
#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24)
#define CLDMA0_AO_BASE 0x10049000
#define CLDMA0_PD_BASE 0x1021d000
#define CLDMA1_AO_BASE 0x1004b000
#define CLDMA1_PD_BASE 0x1021f000
#define CLDMA_R_AO_BASE 0x10023000
#define CLDMA_R_PD_BASE 0x1023d000
/* CLDMA TX */
#define REG_CLDMA_UL_START_ADDRL_0 0x0004
#define REG_CLDMA_UL_START_ADDRH_0 0x0008
#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044
#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048
#define REG_CLDMA_UL_STATUS 0x0084
#define REG_CLDMA_UL_START_CMD 0x0088
#define REG_CLDMA_UL_RESUME_CMD 0x008c
#define REG_CLDMA_UL_STOP_CMD 0x0090
#define REG_CLDMA_UL_ERROR 0x0094
#define REG_CLDMA_UL_CFG 0x0098
#define UL_CFG_BIT_MODE_36 BIT(5)
#define UL_CFG_BIT_MODE_40 BIT(6)
#define UL_CFG_BIT_MODE_64 BIT(7)
#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5)
#define REG_CLDMA_UL_MEM 0x009c
#define UL_MEM_CHECK_DIS BIT(0)
/* CLDMA RX */
#define REG_CLDMA_DL_START_CMD 0x05bc
#define REG_CLDMA_DL_RESUME_CMD 0x05c0
#define REG_CLDMA_DL_STOP_CMD 0x05c4
#define REG_CLDMA_DL_MEM 0x0508
#define DL_MEM_CHECK_DIS BIT(0)
#define REG_CLDMA_DL_CFG 0x0404
#define DL_CFG_UP_HW_LAST BIT(2)
#define DL_CFG_BIT_MODE_36 BIT(10)
#define DL_CFG_BIT_MODE_40 BIT(11)
#define DL_CFG_BIT_MODE_64 BIT(12)
#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10)
#define REG_CLDMA_DL_START_ADDRL_0 0x0478
#define REG_CLDMA_DL_START_ADDRH_0 0x047c
#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8
#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc
#define REG_CLDMA_DL_STATUS 0x04f8
/* CLDMA MISC */
#define REG_CLDMA_L2TISAR0 0x0810
#define REG_CLDMA_L2TISAR1 0x0814
#define REG_CLDMA_L2TIMR0 0x0818
#define REG_CLDMA_L2TIMR1 0x081c
#define REG_CLDMA_L2TIMCR0 0x0820
#define REG_CLDMA_L2TIMCR1 0x0824
#define REG_CLDMA_L2TIMSR0 0x0828
#define REG_CLDMA_L2TIMSR1 0x082c
#define REG_CLDMA_L3TISAR0 0x0830
#define REG_CLDMA_L3TISAR1 0x0834
#define REG_CLDMA_L2RISAR0 0x0850
#define REG_CLDMA_L2RISAR1 0x0854
#define REG_CLDMA_L3RISAR0 0x0870
#define REG_CLDMA_L3RISAR1 0x0874
#define REG_CLDMA_IP_BUSY 0x08b4
#define IP_BUSY_WAKEUP BIT(0)
#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0)
#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0)
/* CLDMA MISC */
#define REG_CLDMA_L2RIMR0 0x0858
#define REG_CLDMA_L2RIMR1 0x085c
#define REG_CLDMA_L2RIMCR0 0x0860
#define REG_CLDMA_L2RIMCR1 0x0864
#define REG_CLDMA_L2RIMSR0 0x0868
#define REG_CLDMA_L2RIMSR1 0x086c
#define REG_CLDMA_BUSY_MASK 0x0954
#define BUSY_MASK_PCIE BIT(0)
#define BUSY_MASK_AP BIT(1)
#define BUSY_MASK_MD BIT(2)
#define REG_CLDMA_INT_MASK 0x0960
/* CLDMA RESET */
#define REG_INFRA_RST4_SET 0x0730
#define RST4_CLDMA1_SW_RST_SET BIT(20)
#define REG_INFRA_RST4_CLR 0x0734
#define RST4_CLDMA1_SW_RST_CLR BIT(20)
#define REG_INFRA_RST2_SET 0x0140
#define RST2_PMIC_SW_RST_SET BIT(18)
#define REG_INFRA_RST2_CLR 0x0144
#define RST2_PMIC_SW_RST_CLR BIT(18)
enum mtk_txrx {
MTK_TX,
MTK_RX,
};
enum t7xx_hw_mode {
MODE_BIT_32,
MODE_BIT_36,
MODE_BIT_40,
MODE_BIT_64,
};
struct t7xx_cldma_hw {
enum t7xx_hw_mode hw_mode;
void __iomem *ap_ao_base;
void __iomem *ap_pdn_base;
u32 phy_interrupt_id;
};
void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx);
unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info);
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info);
void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask);
void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info,
unsigned int qno, u64 address, enum mtk_txrx tx_rx);
void t7xx_cldma_hw_reset(void __iomem *ao_base);
void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx);
unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
enum mtk_txrx tx_rx);
void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info);
void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info);
bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
*/
#ifndef __T7XX_HIF_CLDMA_H__
#define __T7XX_HIF_CLDMA_H__
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/types.h>
#include "t7xx_cldma.h"
#include "t7xx_pci.h"
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
* @CLDMA_ID_AP: Application Processor control channel (not used at the moment).
* @CLDMA_NUM: Number of CLDMA HW units available.
*/
enum cldma_id {
CLDMA_ID_MD,
CLDMA_ID_AP,
CLDMA_NUM
};
struct cldma_gpd {
u8 flags;
u8 not_used1;
__le16 rx_data_allow_len;
__le32 next_gpd_ptr_h;
__le32 next_gpd_ptr_l;
__le32 data_buff_bd_ptr_h;
__le32 data_buff_bd_ptr_l;
__le16 data_buff_len;
__le16 not_used2;
};
struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */
struct sk_buff *skb;
dma_addr_t mapped_buff;
struct list_head entry;
};
struct cldma_ring {
struct list_head gpd_ring; /* Ring of struct cldma_request */
unsigned int length; /* Number of struct cldma_request */
int pkt_size;
};
struct cldma_queue {
struct cldma_ctrl *md_ctrl;
enum mtk_txrx dir;
unsigned int index;
struct cldma_ring *tr_ring;
struct cldma_request *tr_done;
struct cldma_request *rx_refill;
struct cldma_request *tx_next;
int budget; /* Same as ring buffer size by default */
spinlock_t ring_lock;
wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker;
struct work_struct cldma_work;
};
struct cldma_ctrl {
enum cldma_id hif_id;
struct device *dev;
struct t7xx_pci_dev *t7xx_dev;
struct cldma_queue txq[CLDMA_TXQ_NUM];
struct cldma_queue rxq[CLDMA_RXQ_NUM];
unsigned short txq_active;
unsigned short rxq_active;
unsigned short txq_started;
spinlock_t cldma_lock; /* Protects CLDMA structure */
/* Assumes T/R GPD/BD/SPD have the same size */
struct dma_pool *gpd_dmapool;
struct cldma_ring tx_ring[CLDMA_TXQ_NUM];
struct cldma_ring rx_ring[CLDMA_RXQ_NUM];
struct t7xx_cldma_hw hw_info;
bool is_late_init;
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16
#define CLDMA_MTU 3584 /* 3.5kB */
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
#endif /* __T7XX_HIF_CLDMA_H__ */
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <haijun.liu@mediatek.com>
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
*
* Contributors:
* Amir Hanania <amir.hanania@intel.com>
* Andy Shevchenko <andriy.shevchenko@linux.intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Moises Veleta <moises.veleta@intel.com>
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
*/
#ifndef __T7XX_REG_H__
#define __T7XX_REG_H__
enum t7xx_int {
DPMAIF_INT,
CLDMA0_INT,
CLDMA1_INT,
CLDMA2_INT,
MHCCIF_INT,
DPMAIF2_INT,
SAP_RGU_INT,
CLDMA3_INT,
};
#endif /* __T7XX_REG_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment