Commit d27553c1 authored by Jinjian Song's avatar Jinjian Song Committed by David S. Miller

net: wwan: t7xx: Infrastructure for early port configuration

To support cases such as FW update or Core dump, the t7xx
device is capable of signaling the host that a special port
needs to be created before the handshake phase.

Adds the infrastructure required to create the early ports
which also requires a different configuration of CLDMA queues.

Base on the v5 patch version of follow series:
'net: wwan: t7xx: fw flashing & coredump support'
(https://patchwork.kernel.org/project/netdevbpf/patch/3777bb382f4b0395cb594a602c5c79dbab86c9e0.1674307425.git.m.chetan.kumar@linux.intel.com/)
Signed-off-by: default avatarJinjian Song <jinjian.song@fibocom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 409c38d4
...@@ -57,8 +57,6 @@ ...@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000 #define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000 #define CHECK_Q_STOP_STEP_US 10000
#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index) enum mtk_txrx tx_rx, unsigned int index)
{ {
...@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool ...@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb); skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len)); skb_put(skb, le16_to_cpu(gpd->data_buff_len));
ret = md_ctrl->recv_skb(queue, skb); ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */ /* Break processing, will try again later */
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, ...@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/** /**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
* @md_ctrl: CLDMA context structure. * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback. * @recv_skb: Receiving skb callback.
*/ */
void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{ {
md_ctrl->recv_skb = recv_skb; queue->recv_skb = recv_skb;
} }
/** /**
...@@ -993,6 +991,28 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb ...@@ -993,6 +991,28 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
return ret; return ret;
} }
static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
int qno;
for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
}
md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
t7xx_port_proxy_recv_skb_from_dedicated_queue);
}
}
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{ {
char dma_pool_name[32]; char dma_pool_name[32];
...@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) ...@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n"); dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring; goto err_free_tx_ring;
} }
md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
} }
for (j = 0; j < CLDMA_RXQ_NUM; j++) { for (j = 0; j < CLDMA_RXQ_NUM; j++) {
md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
if (j == CLDMA_RXQ_NUM - 1)
md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) { if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n"); dev_err(md_ctrl->dev, "Control RX ring init fail\n");
...@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) ...@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{ {
struct device *dev = &t7xx_dev->pdev->dev; struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl; struct cldma_ctrl *md_ctrl;
int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl) if (!md_ctrl)
...@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) ...@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev; md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev; md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id; md_ctrl->hif_id = hif_id;
md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
t7xx_hw_info_init(md_ctrl); t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0; return 0;
...@@ -1332,9 +1348,10 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) ...@@ -1332,9 +1348,10 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
return -ENOMEM; return -ENOMEM;
} }
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{ {
t7xx_cldma_late_release(md_ctrl); t7xx_cldma_late_release(md_ctrl);
t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl); t7xx_cldma_late_init(md_ctrl);
} }
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
#include "t7xx_cldma.h" #include "t7xx_cldma.h"
#include "t7xx_pci.h" #include "t7xx_pci.h"
#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
#define CLDMA_SHARED_Q_BUFF_SZ 3584
#define CLDMA_DEDICATED_Q_BUFF_SZ 2048
/** /**
* enum cldma_id - Identifiers for CLDMA HW units. * enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel. * @CLDMA_ID_MD: Modem control channel.
...@@ -55,6 +59,11 @@ struct cldma_gpd { ...@@ -55,6 +59,11 @@ struct cldma_gpd {
__le16 not_used2; __le16 not_used2;
}; };
enum cldma_cfg {
CLDMA_SHARED_Q_CFG,
CLDMA_DEDICATED_Q_CFG,
};
struct cldma_request { struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */ struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */ dma_addr_t gpd_addr; /* Physical address for DMA */
...@@ -82,6 +91,7 @@ struct cldma_queue { ...@@ -82,6 +91,7 @@ struct cldma_queue {
wait_queue_head_t req_wq; /* Only for TX */ wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker; struct workqueue_struct *worker;
struct work_struct cldma_work; struct work_struct cldma_work;
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
}; };
struct cldma_ctrl { struct cldma_ctrl {
...@@ -101,24 +111,22 @@ struct cldma_ctrl { ...@@ -101,24 +111,22 @@ struct cldma_ctrl {
struct md_pm_entity *pm_entity; struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info; struct t7xx_cldma_hw hw_info;
bool is_late_init; bool is_late_init;
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
}; };
#define CLDMA_Q_IDX_DUMP 1
#define GPD_FLAGS_HWO BIT(0) #define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7) #define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16 #define GPD_DMAPOOL_ALIGN 16
#define CLDMA_MTU 3584 /* 3.5kB */
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
......
...@@ -535,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work) ...@@ -535,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in core_reset() */ /* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true; md->core_md.handshake_ongoing = true;
...@@ -550,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work) ...@@ -550,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */ /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT); t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true; md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT); t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
...@@ -764,6 +764,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) ...@@ -764,6 +764,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{ {
enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
struct t7xx_modem *md = t7xx_dev->md; struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
...@@ -771,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) ...@@ -771,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
if (!md->md_init_finish) if (!md->md_init_finish)
return; return;
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox); t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
......
...@@ -183,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) ...@@ -183,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
return t7xx_wait_pm_config(t7xx_dev); return 0;
} }
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
......
...@@ -75,6 +75,8 @@ enum port_ch { ...@@ -75,6 +75,8 @@ enum port_ch {
PORT_CH_DSS6_TX = 0x20df, PORT_CH_DSS6_TX = 0x20df,
PORT_CH_DSS7_RX = 0x20e0, PORT_CH_DSS7_RX = 0x20e0,
PORT_CH_DSS7_TX = 0x20e1, PORT_CH_DSS7_TX = 0x20e1,
PORT_CH_UNIMPORTANT = 0xffff,
}; };
struct t7xx_port; struct t7xx_port;
...@@ -135,11 +137,13 @@ struct t7xx_port { ...@@ -135,11 +137,13 @@ struct t7xx_port {
}; };
}; };
int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload); struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload); struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg); unsigned int ex_msg);
int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg); unsigned int ex_msg);
......
...@@ -48,6 +48,9 @@ ...@@ -48,6 +48,9 @@
i < (proxy)->port_count; \ i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i]) i++, (p) = &(proxy)->ports[i])
#define T7XX_MAX_POSSIBLE_PORTS_NUM \
(max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
static const struct t7xx_port_conf t7xx_port_conf[] = { static const struct t7xx_port_conf t7xx_port_conf[] = {
{ {
.tx_ch = PORT_CH_UART2_TX, .tx_ch = PORT_CH_UART2_TX,
...@@ -100,6 +103,18 @@ static const struct t7xx_port_conf t7xx_port_conf[] = { ...@@ -100,6 +103,18 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
}, },
}; };
static const struct t7xx_port_conf t7xx_early_port_conf[] = {
{
.tx_ch = PORT_CH_UNIMPORTANT,
.rx_ch = PORT_CH_UNIMPORTANT,
.txq_index = CLDMA_Q_IDX_DUMP,
.rxq_index = CLDMA_Q_IDX_DUMP,
.txq_exp_index = CLDMA_Q_IDX_DUMP,
.rxq_exp_index = CLDMA_Q_IDX_DUMP,
.path_id = CLDMA_ID_AP,
},
};
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{ {
const struct t7xx_port_conf *port_conf; const struct t7xx_port_conf *port_conf;
...@@ -214,7 +229,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) ...@@ -214,7 +229,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0; return 0;
} }
static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) int t7xx_get_port_mtu(struct t7xx_port *port)
{
enum cldma_id path_id = port->port_conf->path_id;
int tx_qno = t7xx_port_get_queue_no(port);
struct cldma_ctrl *md_ctrl;
md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
return md_ctrl->tx_ring[tx_qno].pkt_size;
}
int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{ {
enum cldma_id path_id = port->port_conf->path_id; enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl; struct cldma_ctrl *md_ctrl;
...@@ -329,6 +354,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) ...@@ -329,6 +354,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
} }
} }
/**
* t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
* @queue: CLDMA queue.
* @skb: Socket buffer.
*
* Return:
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
{
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
struct port_proxy *port_prox = t7xx_dev->md->port_prox;
const struct t7xx_port_conf *port_conf;
struct t7xx_port *port;
int ret;
port = &port_prox->ports[0];
if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
dev_kfree_skb_any(skb);
return -EINVAL;
}
port_conf = port->port_conf;
ret = port_conf->ops->recv_skb(port, skb);
if (ret < 0 && ret != -ENOBUFS) {
dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
dev_kfree_skb_any(skb);
}
return ret;
}
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel) struct cldma_queue *queue, u16 channel)
{ {
...@@ -359,7 +417,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev ...@@ -359,7 +417,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
** 0 - Packet consumed. ** 0 - Packet consumed.
** -ERROR - Failed to process skb. ** -ERROR - Failed to process skb.
*/ */
static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{ {
struct ccci_header *ccci_h = (struct ccci_header *)skb->data; struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
...@@ -444,33 +502,54 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) ...@@ -444,33 +502,54 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
spin_lock_init(&port->port_update_lock); spin_lock_init(&port->port_update_lock);
port->chan_enable = false; port->chan_enable = false;
if (port_conf->ops->init) if (port_conf->ops && port_conf->ops->init)
port_conf->ops->init(port); port_conf->ops->init(port);
} }
t7xx_proxy_setup_ch_mapping(port_prox); t7xx_proxy_setup_ch_mapping(port_prox);
} }
void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
{
struct port_proxy *port_prox = md->port_prox;
const struct t7xx_port_conf *port_conf;
u32 port_count;
int i;
t7xx_port_proxy_uninit(port_prox);
if (cfg_id == PORT_CFG_ID_EARLY) {
port_conf = t7xx_early_port_conf;
port_count = ARRAY_SIZE(t7xx_early_port_conf);
} else {
port_conf = t7xx_port_conf;
port_count = ARRAY_SIZE(t7xx_port_conf);
}
for (i = 0; i < port_count; i++)
port_prox->ports[i].port_conf = &port_conf[i];
port_prox->cfg_id = cfg_id;
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
}
static int t7xx_proxy_alloc(struct t7xx_modem *md) static int t7xx_proxy_alloc(struct t7xx_modem *md)
{ {
unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev; struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox; struct port_proxy *port_prox;
int i;
port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, port_prox = devm_kzalloc(dev, sizeof(*port_prox) +
sizeof(struct t7xx_port) * T7XX_MAX_POSSIBLE_PORTS_NUM,
GFP_KERNEL); GFP_KERNEL);
if (!port_prox) if (!port_prox)
return -ENOMEM; return -ENOMEM;
md->port_prox = port_prox; md->port_prox = port_prox;
port_prox->dev = dev; port_prox->dev = dev;
t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
for (i = 0; i < port_count; i++)
port_prox->ports[i].port_conf = &t7xx_port_conf[i];
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
return 0; return 0;
} }
...@@ -492,8 +571,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md) ...@@ -492,8 +571,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret) if (ret)
return ret; return ret;
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0; return 0;
} }
...@@ -505,7 +582,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox) ...@@ -505,7 +582,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
for_each_proxy_port(i, port, port_prox) { for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf; const struct t7xx_port_conf *port_conf = port->port_conf;
if (port_conf->ops->uninit) if (port_conf->ops && port_conf->ops->uninit)
port_conf->ops->uninit(port); port_conf->ops->uninit(port);
} }
} }
......
...@@ -31,11 +31,18 @@ ...@@ -31,11 +31,18 @@
#define RX_QUEUE_MAXLEN 32 #define RX_QUEUE_MAXLEN 32
#define CTRL_QUEUE_MAXLEN 16 #define CTRL_QUEUE_MAXLEN 16
enum port_cfg_id {
PORT_CFG_ID_INVALID,
PORT_CFG_ID_NORMAL,
PORT_CFG_ID_EARLY,
};
struct port_proxy { struct port_proxy {
int port_count; int port_count;
struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
struct device *dev; struct device *dev;
enum port_cfg_id cfg_id;
struct t7xx_port ports[]; struct t7xx_port ports[];
}; };
...@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int ...@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag); bool en_flag);
void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
#endif /* __T7XX_PORT_PROXY_H__ */ #endif /* __T7XX_PORT_PROXY_H__ */
...@@ -152,14 +152,15 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) ...@@ -152,14 +152,15 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{ {
const struct t7xx_port_conf *port_conf = port->port_conf; const struct t7xx_port_conf *port_conf = port->port_conf;
unsigned int header_len = sizeof(struct ccci_header); unsigned int header_len = sizeof(struct ccci_header), mtu;
struct wwan_port_caps caps; struct wwan_port_caps caps;
if (state != MD_STATE_READY) if (state != MD_STATE_READY)
return; return;
if (!port->wwan.wwan_port) { if (!port->wwan.wwan_port) {
caps.frag_len = CLDMA_MTU - header_len; mtu = t7xx_get_port_mtu(port);
caps.frag_len = mtu - header_len;
caps.headroom_len = header_len; caps.headroom_len = header_len;
port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type, port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
&wwan_ops, &caps, port); &wwan_ops, &caps, port);
......
...@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state { ...@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
PM_RESUME_REG_STATE_L2_EXP, PM_RESUME_REG_STATE_L2_EXP,
}; };
enum host_event_e {
HOST_EVENT_INIT = 0,
FASTBOOT_DL_NOTIFY = 0x3,
};
#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c #define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
#define MISC_STAGE_MASK GENMASK(2, 0) #define MISC_STAGE_MASK GENMASK(2, 0)
#define MISC_RESET_TYPE_PLDR BIT(26) #define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_RESET_TYPE_FLDR BIT(27) #define MISC_RESET_TYPE_FLDR BIT(27)
#define LINUX_STAGE 4 #define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_LK_EVENT_MASK GENMASK(11, 8)
#define HOST_EVENT_MASK GENMASK(31, 28)
enum lk_event_id {
LK_EVENT_NORMAL = 0,
LK_EVENT_CREATE_PD_PORT = 1,
LK_EVENT_CREATE_POST_DL_PORT = 2,
LK_EVENT_RESET = 7,
};
enum t7xx_device_stage {
T7XX_DEV_STAGE_INIT = 0,
T7XX_DEV_STAGE_BROM_PRE = 1,
T7XX_DEV_STAGE_BROM_POST = 2,
T7XX_DEV_STAGE_LK = 3,
T7XX_DEV_STAGE_LINUX = 4,
};
#define T7XX_PCIE_RESOURCE_STATUS 0x0d28 #define T7XX_PCIE_RESOURCE_STATUS 0x0d28
#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) #define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
......
...@@ -47,6 +47,13 @@ ...@@ -47,6 +47,13 @@
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000 #define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000 #define FSM_CMD_TIMEOUT_MS 2000
#define wait_for_expected_dev_stage(status) \
read_poll_timeout(ioread32, status, \
((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
20000000, false, IREG_BASE(md->t7xx_dev) + \
T7XX_PCIE_MISC_DEV_STATUS)
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{ {
struct t7xx_fsm_ctl *ctl = md->fsm_ctl; struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
...@@ -206,6 +213,51 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm ...@@ -206,6 +213,51 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0); fsm_finish_command(ctl, cmd, 0);
} }
static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
{
u32 value;
value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
value &= ~HOST_EVENT_MASK;
value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
}
static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
{
struct t7xx_modem *md = ctl->md;
struct cldma_ctrl *md_ctrl;
enum lk_event_id lk_event;
struct device *dev;
dev = &md->t7xx_dev->pdev->dev;
lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
switch (lk_event) {
case LK_EVENT_NORMAL:
case LK_EVENT_RESET:
break;
case LK_EVENT_CREATE_PD_PORT:
case LK_EVENT_CREATE_POST_DL_PORT:
md_ctrl = md->md_ctrl[CLDMA_ID_AP];
t7xx_cldma_hif_hw_init(md_ctrl);
t7xx_cldma_stop(md_ctrl);
t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
t7xx_cldma_start(md_ctrl);
if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
else
t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
break;
default:
dev_err(dev, "Invalid LK event %d\n", lk_event);
break;
}
}
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{ {
ctl->curr_state = FSM_STATE_STOPPED; ctl->curr_state = FSM_STATE_STOPPED;
...@@ -226,8 +278,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman ...@@ -226,8 +278,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{ {
struct t7xx_pci_dev *t7xx_dev; struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
struct cldma_ctrl *md_ctrl; struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
int err; int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
...@@ -235,18 +288,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma ...@@ -235,18 +288,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
return; return;
} }
md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
t7xx_dev = ctl->md->t7xx_dev;
ctl->curr_state = FSM_STATE_STOPPING; ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl); t7xx_cldma_stop(md_ctrl);
if (!ctl->md->rgu_irq_asserted) { if (mode == T7XX_FASTBOOT_SWITCHING)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS); t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS);
if (mode == T7XX_FASTBOOT_SWITCHING) {
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
} else {
err = t7xx_acpi_fldr_func(t7xx_dev); err = t7xx_acpi_fldr_func(t7xx_dev);
if (err) if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
...@@ -318,7 +373,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) ...@@ -318,7 +373,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{ {
struct t7xx_modem *md = ctl->md; struct t7xx_modem *md = ctl->md;
u32 dev_status; struct device *dev;
u32 status;
int ret; int ret;
if (!md) if (!md)
...@@ -330,23 +386,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command ...@@ -330,23 +386,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return; return;
} }
dev = &md->t7xx_dev->pdev->dev;
ctl->curr_state = FSM_STATE_PRE_START; ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START); t7xx_md_event_notify(md, FSM_PRE_START);
ret = read_poll_timeout(ioread32, dev_status, ret = wait_for_expected_dev_stage(status);
(dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (ret) { if (ret) {
struct device *dev = &md->t7xx_dev->pdev->dev; dev_err(dev, "read poll timeout %d\n", ret);
goto finish_command;
}
fsm_finish_command(ctl, cmd, -ETIMEDOUT); if (status != ctl->status || cmd->flag != 0) {
dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
return;
switch (stage) {
case T7XX_DEV_STAGE_INIT:
case T7XX_DEV_STAGE_BROM_PRE:
case T7XX_DEV_STAGE_BROM_POST:
dev_dbg(dev, "BROM_STAGE Entered\n");
ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
break;
case T7XX_DEV_STAGE_LK:
dev_dbg(dev, "LK_STAGE Entered\n");
t7xx_lk_stage_event_handling(ctl, status);
break;
case T7XX_DEV_STAGE_LINUX:
dev_dbg(dev, "LINUX_STAGE Entered\n");
t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
if (cmd->flag == 0)
break;
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
ret = fsm_routine_starting(ctl);
break;
default:
break;
}
ctl->status = status;
} }
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]); finish_command:
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); fsm_finish_command(ctl, cmd, ret);
fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
} }
static int fsm_main_thread(void *data) static int fsm_main_thread(void *data)
...@@ -518,6 +604,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md) ...@@ -518,6 +604,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
fsm_flush_event_cmd_qs(ctl); fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED; ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false; ctl->exp_flg = false;
ctl->status = T7XX_DEV_STAGE_INIT;
} }
int t7xx_fsm_init(struct t7xx_modem *md) int t7xx_fsm_init(struct t7xx_modem *md)
......
...@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl { ...@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
bool exp_flg; bool exp_flg;
spinlock_t notifier_lock; /* Protects notifier list */ spinlock_t notifier_lock; /* Protects notifier list */
struct list_head notifier_list; struct list_head notifier_list;
u32 status; /* Device boot stage */
}; };
struct t7xx_fsm_event { struct t7xx_fsm_event {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment