Commit 5b65781d authored by Vignesh Raghavendra's avatar Vignesh Raghavendra Committed by Vinod Koul

dmaengine: ti: k3-udma-glue: Add support for K3 PKTDMA

This commit adds support for PKTDMA in k3-udma glue driver. Use new
psil_endpoint_config struct to get static data for a given channel or a
flow during setup.  Make sure that the RX flows being mapped to a RX
channel is within the range of flows that is been allocated to that RX
channel.
Signed-off-by: default avatarVignesh Raghavendra <vigneshr@ti.com>
Signed-off-by: default avatarPeter Ujfalusi <peter.ujfalusi@ti.com>
Link: https://lore.kernel.org/r/20201208090440.31792-21-peter.ujfalusi@ti.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent d2abc982
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
struct k3_udma_glue_common { struct k3_udma_glue_common {
struct device *dev; struct device *dev;
struct device chan_dev;
struct udma_dev *udmax; struct udma_dev *udmax;
const struct udma_tisci_rm *tisci_rm; const struct udma_tisci_rm *tisci_rm;
struct k3_ringacc *ringacc; struct k3_ringacc *ringacc;
...@@ -32,7 +33,8 @@ struct k3_udma_glue_common { ...@@ -32,7 +33,8 @@ struct k3_udma_glue_common {
bool epib; bool epib;
u32 psdata_size; u32 psdata_size;
u32 swdata_size; u32 swdata_size;
u32 atype; u32 atype_asel;
struct psil_endpoint_config *ep_config;
}; };
struct k3_udma_glue_tx_channel { struct k3_udma_glue_tx_channel {
...@@ -53,6 +55,8 @@ struct k3_udma_glue_tx_channel { ...@@ -53,6 +55,8 @@ struct k3_udma_glue_tx_channel {
bool tx_filt_einfo; bool tx_filt_einfo;
bool tx_filt_pswords; bool tx_filt_pswords;
bool tx_supr_tdpkt; bool tx_supr_tdpkt;
int udma_tflow_id;
}; };
struct k3_udma_glue_rx_flow { struct k3_udma_glue_rx_flow {
...@@ -81,6 +85,16 @@ struct k3_udma_glue_rx_channel { ...@@ -81,6 +85,16 @@ struct k3_udma_glue_rx_channel {
u32 flows_ready; u32 flows_ready;
}; };
static void k3_udma_chan_dev_release(struct device *dev)
{
/* The struct containing the device is devm managed */
}
static struct class k3_udma_glue_devclass = {
.name = "k3_udma_glue_chan",
.dev_release = k3_udma_chan_dev_release,
};
#define K3_UDMAX_TDOWN_TIMEOUT_US 1000 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
static int of_k3_udma_glue_parse(struct device_node *udmax_np, static int of_k3_udma_glue_parse(struct device_node *udmax_np,
...@@ -100,7 +114,6 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, ...@@ -100,7 +114,6 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common, const char *name, struct k3_udma_glue_common *common,
bool tx_chn) bool tx_chn)
{ {
struct psil_endpoint_config *ep_config;
struct of_phandle_args dma_spec; struct of_phandle_args dma_spec;
u32 thread_id; u32 thread_id;
int ret = 0; int ret = 0;
...@@ -117,15 +130,26 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, ...@@ -117,15 +130,26 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
&dma_spec)) &dma_spec))
return -ENOENT; return -ENOENT;
ret = of_k3_udma_glue_parse(dma_spec.np, common);
if (ret)
goto out_put_spec;
thread_id = dma_spec.args[0]; thread_id = dma_spec.args[0];
if (dma_spec.args_count == 2) { if (dma_spec.args_count == 2) {
if (dma_spec.args[1] > 2) { if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
dev_err(common->dev, "Invalid channel atype: %u\n", dev_err(common->dev, "Invalid channel atype: %u\n",
dma_spec.args[1]); dma_spec.args[1]);
ret = -EINVAL; ret = -EINVAL;
goto out_put_spec; goto out_put_spec;
} }
common->atype = dma_spec.args[1]; if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
dev_err(common->dev, "Invalid channel asel: %u\n",
dma_spec.args[1]);
ret = -EINVAL;
goto out_put_spec;
}
common->atype_asel = dma_spec.args[1];
} }
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
...@@ -139,25 +163,23 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, ...@@ -139,25 +163,23 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
} }
/* get psil endpoint config */ /* get psil endpoint config */
ep_config = psil_get_ep_config(thread_id); common->ep_config = psil_get_ep_config(thread_id);
if (IS_ERR(ep_config)) { if (IS_ERR(common->ep_config)) {
dev_err(common->dev, dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n", "No configuration for psi-l thread 0x%04x\n",
thread_id); thread_id);
ret = PTR_ERR(ep_config); ret = PTR_ERR(common->ep_config);
goto out_put_spec; goto out_put_spec;
} }
common->epib = ep_config->needs_epib; common->epib = common->ep_config->needs_epib;
common->psdata_size = ep_config->psd_size; common->psdata_size = common->ep_config->psd_size;
if (tx_chn) if (tx_chn)
common->dst_thread = thread_id; common->dst_thread = thread_id;
else else
common->src_thread = thread_id; common->src_thread = thread_id;
ret = of_k3_udma_glue_parse(dma_spec.np, common);
out_put_spec: out_put_spec:
of_node_put(dma_spec.np); of_node_put(dma_spec.np);
return ret; return ret;
...@@ -223,7 +245,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) ...@@ -223,7 +245,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1; req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
req.tx_atype = tx_chn->common.atype; req.tx_atype = tx_chn->common.atype_asel;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
} }
...@@ -255,8 +277,14 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ...@@ -255,8 +277,14 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
tx_chn->common.psdata_size, tx_chn->common.psdata_size,
tx_chn->common.swdata_size); tx_chn->common.swdata_size);
if (xudma_is_pktdma(tx_chn->common.udmax))
tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
else
tx_chn->udma_tchan_id = -1;
/* request and cfg UDMAP TX channel */ /* request and cfg UDMAP TX channel */
tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
tx_chn->udma_tchan_id);
if (IS_ERR(tx_chn->udma_tchanx)) { if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx); ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret); dev_err(dev, "UDMAX tchanx get err %d\n", ret);
...@@ -264,11 +292,34 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ...@@ -264,11 +292,34 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
} }
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
ret = device_register(&tx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
tx_chn->common.chan_dev.parent = NULL;
goto err;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
/* prepare the channel device as coherent */
tx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
DMA_BIT_MASK(48));
}
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
if (xudma_is_pktdma(tx_chn->common.udmax))
tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
else
tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
/* request and cfg rings */ /* request and cfg rings */
ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc, ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
tx_chn->udma_tchan_id, -1, tx_chn->udma_tflow_id, -1,
&tx_chn->ringtx, &tx_chn->ringtx,
&tx_chn->ringtxcq); &tx_chn->ringtxcq);
if (ret) { if (ret) {
...@@ -280,6 +331,12 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ...@@ -280,6 +331,12 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn); cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev; cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
/* Set the ASEL value for DMA rings of PKTDMA */
if (xudma_is_pktdma(tx_chn->common.udmax)) {
cfg->tx_cfg.asel = tx_chn->common.atype_asel;
cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret); dev_err(dev, "Failed to cfg ringtx %d\n", ret);
...@@ -331,6 +388,11 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) ...@@ -331,6 +388,11 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
if (tx_chn->ringtx) if (tx_chn->ringtx)
k3_ringacc_ring_free(tx_chn->ringtx); k3_ringacc_ring_free(tx_chn->ringtx);
if (tx_chn->common.chan_dev.parent) {
device_unregister(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
}
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
...@@ -443,13 +505,10 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, ...@@ -443,13 +505,10 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
void *data, void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma)) void (*cleanup)(void *data, dma_addr_t desc_dma))
{ {
struct device *dev = tx_chn->common.dev;
dma_addr_t desc_dma; dma_addr_t desc_dma;
int occ_tx, i, ret; int occ_tx, i, ret;
/* reset TXCQ as it is not input for udma - expected to be empty */
if (tx_chn->ringtxcq)
k3_ringacc_ring_reset(tx_chn->ringtxcq);
/* /*
* TXQ reset need to be special way as it is input for udma and its * TXQ reset need to be special way as it is input for udma and its
* state cached by udma, so: * state cached by udma, so:
...@@ -458,17 +517,20 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, ...@@ -458,17 +517,20 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
* 3) reset TXQ in a special way * 3) reset TXQ in a special way
*/ */
occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
for (i = 0; i < occ_tx; i++) { for (i = 0; i < occ_tx; i++) {
ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
if (ret) { if (ret) {
dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); if (ret != -ENODATA)
dev_err(dev, "TX reset pop %d\n", ret);
break; break;
} }
cleanup(data, desc_dma); cleanup(data, desc_dma);
} }
/* reset TXCQ as it is not input for udma - expected to be empty */
k3_ringacc_ring_reset(tx_chn->ringtxcq);
k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
...@@ -487,7 +549,12 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); ...@@ -487,7 +549,12 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
{ {
tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); if (xudma_is_pktdma(tx_chn->common.udmax)) {
tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
tx_chn->udma_tflow_id);
} else {
tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
}
return tx_chn->virq; return tx_chn->virq;
} }
...@@ -496,10 +563,36 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); ...@@ -496,10 +563,36 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
struct device * struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn) k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
{ {
if (xudma_is_pktdma(tx_chn->common.udmax) &&
(tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
return &tx_chn->common.chan_dev;
return xudma_get_device(tx_chn->common.udmax); return xudma_get_device(tx_chn->common.udmax);
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device); EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *addr)
{
if (!xudma_is_pktdma(tx_chn->common.udmax) ||
!tx_chn->common.atype_asel)
return;
*addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *addr)
{
if (!xudma_is_pktdma(tx_chn->common.udmax) ||
!tx_chn->common.atype_asel)
return;
*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{ {
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
...@@ -511,8 +604,6 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) ...@@ -511,8 +604,6 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID; TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id; req.nav_id = tisci_rm->tisci_dev_id;
...@@ -524,13 +615,16 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) ...@@ -524,13 +615,16 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
* req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
*/ */
req.rxcq_qnum = 0xFFFF; req.rxcq_qnum = 0xFFFF;
if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
/* Default flow + extra ones */ /* Default flow + extra ones */
req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
req.flowid_start = rx_chn->flow_id_base; req.flowid_start = rx_chn->flow_id_base;
req.flowid_cnt = rx_chn->flow_num; req.flowid_cnt = rx_chn->flow_num;
} }
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
req.rx_atype = rx_chn->common.atype; req.rx_atype = rx_chn->common.atype_asel;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret) if (ret)
...@@ -584,10 +678,18 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -584,10 +678,18 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put; goto err_rflow_put;
} }
if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_ringfdq_id = flow->udma_rflow_id +
xudma_get_rflow_ring_offset(rx_chn->common.udmax);
rx_ring_id = 0;
} else {
rx_ring_id = flow_cfg->ring_rxq_id;
rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
}
/* request and cfg rings */ /* request and cfg rings */
ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc, ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
flow_cfg->ring_rxfdq0_id, rx_ringfdq_id, rx_ring_id,
flow_cfg->ring_rxq_id,
&flow->ringrxfdq, &flow->ringrxfdq,
&flow->ringrx); &flow->ringrx);
if (ret) { if (ret) {
...@@ -599,6 +701,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -599,6 +701,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn); flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev; flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
/* Set the ASEL value for DMA rings of PKTDMA */
if (xudma_is_pktdma(rx_chn->common.udmax)) {
flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
}
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret); dev_err(dev, "Failed to cfg ringrx %d\n", ret);
...@@ -757,6 +865,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, ...@@ -757,6 +865,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg) struct k3_udma_glue_rx_channel_cfg *cfg)
{ {
struct k3_udma_glue_rx_channel *rx_chn; struct k3_udma_glue_rx_channel *rx_chn;
struct psil_endpoint_config *ep_cfg;
int ret, i; int ret, i;
if (cfg->flow_id_num <= 0) if (cfg->flow_id_num <= 0)
...@@ -784,8 +893,16 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, ...@@ -784,8 +893,16 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
rx_chn->common.psdata_size, rx_chn->common.psdata_size,
rx_chn->common.swdata_size); rx_chn->common.swdata_size);
ep_cfg = rx_chn->common.ep_config;
if (xudma_is_pktdma(rx_chn->common.udmax))
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
else
rx_chn->udma_rchan_id = -1;
/* request and cfg UDMAP RX channel */ /* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
rx_chn->udma_rchan_id);
if (IS_ERR(rx_chn->udma_rchanx)) { if (IS_ERR(rx_chn->udma_rchanx)) {
ret = PTR_ERR(rx_chn->udma_rchanx); ret = PTR_ERR(rx_chn->udma_rchanx);
dev_err(dev, "UDMAX rchanx get err %d\n", ret); dev_err(dev, "UDMAX rchanx get err %d\n", ret);
...@@ -793,12 +910,48 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, ...@@ -793,12 +910,48 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
} }
rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
rx_chn->flow_num = cfg->flow_id_num; rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
rx_chn->flow_id_base = cfg->flow_id_base; rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
rx_chn->udma_rchan_id, rx_chn->common.src_thread);
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
rx_chn->common.chan_dev.parent = NULL;
goto err;
}
/* Use RX channel id as flow id: target dev can't generate flow_id */ if (xudma_is_pktdma(rx_chn->common.udmax)) {
if (cfg->flow_id_use_rxchan_id) /* prepare the channel device as coherent */
rx_chn->flow_id_base = rx_chn->udma_rchan_id; rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
}
if (xudma_is_pktdma(rx_chn->common.udmax)) {
int flow_start = cfg->flow_id_base;
int flow_end;
if (flow_start == -1)
flow_start = ep_cfg->flow_start;
flow_end = flow_start + cfg->flow_id_num - 1;
if (flow_start < ep_cfg->flow_start ||
flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
dev_err(dev, "Invalid flow range requested\n");
ret = -EINVAL;
goto err;
}
rx_chn->flow_id_base = flow_start;
} else {
rx_chn->flow_id_base = cfg->flow_id_base;
/* Use RX channel id as flow id: target dev can't generate flow_id */
if (cfg->flow_id_use_rxchan_id)
rx_chn->flow_id_base = rx_chn->udma_rchan_id;
}
rx_chn->flow_num = cfg->flow_id_num;
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL); sizeof(*rx_chn->flows), GFP_KERNEL);
...@@ -888,6 +1041,24 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, ...@@ -888,6 +1041,24 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
goto err; goto err;
} }
rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
rx_chn->common.src_thread);
ret = device_register(&rx_chn->common.chan_dev);
if (ret) {
dev_err(dev, "Channel Device registration failed %d\n", ret);
rx_chn->common.chan_dev.parent = NULL;
goto err;
}
if (xudma_is_pktdma(rx_chn->common.udmax)) {
/* prepare the channel device as coherent */
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
if (ret) if (ret)
goto err; goto err;
...@@ -940,6 +1111,11 @@ void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) ...@@ -940,6 +1111,11 @@ void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
xudma_rchan_put(rx_chn->common.udmax, xudma_rchan_put(rx_chn->common.udmax,
rx_chn->udma_rchanx); rx_chn->udma_rchanx);
if (rx_chn->common.chan_dev.parent) {
device_unregister(&rx_chn->common.chan_dev);
rx_chn->common.chan_dev.parent = NULL;
}
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
...@@ -1151,12 +1327,10 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -1151,12 +1327,10 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
/* reset RXCQ as it is not input for udma - expected to be empty */ /* reset RXCQ as it is not input for udma - expected to be empty */
occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
if (flow->ringrx)
k3_ringacc_ring_reset(flow->ringrx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */ /* Skip RX FDQ in case one FDQ is used for the set of flows */
if (skip_fdq) if (skip_fdq)
return; goto do_reset;
/* /*
* RX FDQ reset need to be special way as it is input for udma and its * RX FDQ reset need to be special way as it is input for udma and its
...@@ -1171,13 +1345,17 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -1171,13 +1345,17 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
for (i = 0; i < occ_rx; i++) { for (i = 0; i < occ_rx; i++) {
ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
if (ret) { if (ret) {
dev_err(dev, "RX reset pop %d\n", ret); if (ret != -ENODATA)
dev_err(dev, "RX reset pop %d\n", ret);
break; break;
} }
cleanup(data, desc_dma); cleanup(data, desc_dma);
} }
k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
do_reset:
k3_ringacc_ring_reset(flow->ringrx);
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
...@@ -1207,7 +1385,12 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -1207,7 +1385,12 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow = &rx_chn->flows[flow_num]; flow = &rx_chn->flows[flow_num];
flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); if (xudma_is_pktdma(rx_chn->common.udmax)) {
flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
flow->udma_rflow_id);
} else {
flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
}
return flow->virq; return flow->virq;
} }
...@@ -1216,6 +1399,38 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); ...@@ -1216,6 +1399,38 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
struct device * struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn) k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
{ {
if (xudma_is_pktdma(rx_chn->common.udmax) &&
(rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
return &rx_chn->common.chan_dev;
return xudma_get_device(rx_chn->common.udmax); return xudma_get_device(rx_chn->common.udmax);
} }
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device); EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
dma_addr_t *addr)
{
if (!xudma_is_pktdma(rx_chn->common.udmax) ||
!rx_chn->common.atype_asel)
return;
*addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
dma_addr_t *addr)
{
if (!xudma_is_pktdma(rx_chn->common.udmax) ||
!rx_chn->common.atype_asel)
return;
*addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
static int __init k3_udma_glue_class_init(void)
{
return class_register(&k3_udma_glue_devclass);
}
arch_initcall(k3_udma_glue_class_init);
...@@ -157,3 +157,27 @@ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ ...@@ -157,3 +157,27 @@ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
EXPORT_SYMBOL(xudma_##res##rt_write) EXPORT_SYMBOL(xudma_##res##rt_write)
XUDMA_RT_IO_FUNCTIONS(tchan); XUDMA_RT_IO_FUNCTIONS(tchan);
XUDMA_RT_IO_FUNCTIONS(rchan); XUDMA_RT_IO_FUNCTIONS(rchan);
int xudma_is_pktdma(struct udma_dev *ud)
{
return ud->match_data->type == DMA_TYPE_PKTDMA;
}
EXPORT_SYMBOL(xudma_is_pktdma);
int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
oes->pktdma_tchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
oes->pktdma_rchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
...@@ -157,4 +157,8 @@ void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val); ...@@ -157,4 +157,8 @@ void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id); bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
int xudma_get_rflow_ring_offset(struct udma_dev *ud); int xudma_get_rflow_ring_offset(struct udma_dev *ud);
int xudma_is_pktdma(struct udma_dev *ud);
int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id);
int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id);
#endif /* K3_UDMA_H_ */ #endif /* K3_UDMA_H_ */
...@@ -43,6 +43,10 @@ u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn); ...@@ -43,6 +43,10 @@ u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn); int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
struct device * struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn); k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *addr);
void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
dma_addr_t *addr);
enum { enum {
K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0, K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
...@@ -134,5 +138,9 @@ int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, ...@@ -134,5 +138,9 @@ int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx); u32 flow_idx);
struct device * struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn); k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
dma_addr_t *addr);
void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
dma_addr_t *addr);
#endif /* K3_UDMA_GLUE_H_ */ #endif /* K3_UDMA_GLUE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment