Commit e2dbda0f authored by David S. Miller's avatar David S. Miller

Merge branch 'am65-cpsw-suspend-resume'

Roger Quadros says:

====================
net: ethernet: ti: am65-cpsw: Add suspend/resume support

This series enables PM_SLEEP(suspend/resume) support to
the am65-cpsw network driver.

Dual-emac and Switch mode are tested to work with suspend/resume
on AM62-SK platform.

It can be verified on the following branch
https://github.com/rogerq/linux/commits/for-v6.2/am62-cpsw-lpm-1.0
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4e0243e7 1af3cb37
......@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/rtnetlink.h>
#include <linux/mfd/syscon.h>
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
......@@ -132,6 +133,11 @@
NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common);
static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common);
static void am65_cpsw_nuss_free_tx_chns(struct am65_cpsw_common *common);
static void am65_cpsw_nuss_free_rx_chns(struct am65_cpsw_common *common);
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
......@@ -373,6 +379,20 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
if (common->usage_count)
return 0;
/* init tx/rx channels */
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret) {
dev_err(common->dev, "init_tx_chns failed\n");
return ret;
}
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret) {
dev_err(common->dev, "init_rx_chns failed\n");
am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
/* Control register */
writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
......@@ -401,6 +421,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
/* disable priority elevation */
writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
cpsw_ale_control_set(common->ale, 0, ALE_CLEAR, 1);
cpsw_ale_start(common->ale);
/* limit to one RX flow only */
......@@ -432,7 +453,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
GFP_KERNEL);
if (!skb) {
dev_err(common->dev, "cannot allocate skb\n");
return -ENOMEM;
ret = -ENOMEM;
goto err;
}
ret = am65_cpsw_nuss_rx_push(common, skb);
......@@ -441,7 +463,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
"cannot submit skb to channel rx, error %d\n",
ret);
kfree_skb(skb);
return ret;
goto err;
}
kmemleak_not_leak(skb);
}
......@@ -450,7 +472,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
for (i = 0; i < common->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
if (ret)
return ret;
goto err;
napi_enable(&common->tx_chns[i].napi_tx);
}
......@@ -462,6 +484,12 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
err:
am65_cpsw_nuss_free_tx_chns(common);
am65_cpsw_nuss_free_rx_chns(common);
return ret;
}
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
......@@ -515,6 +543,9 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
am65_cpsw_nuss_free_tx_chns(common);
am65_cpsw_nuss_free_rx_chns(common);
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
......@@ -555,11 +586,29 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret, i;
u32 reg;
int tmo;
ret = pm_runtime_resume_and_get(common->dev);
if (ret < 0)
return ret;
/* Idle MAC port */
cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
dev_info(common->dev, "down msc_sl %08x tmo %d\n",
cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
cpsw_sl_ctl_reset(port->slave.mac_sl);
/* soft reset MAC */
cpsw_sl_reg_write(port->slave.mac_sl, CPSW_SL_SOFT_RESET, 1);
mdelay(1);
reg = cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_SOFT_RESET);
if (reg)
dev_info(common->dev, "mac reset not yet done\n");
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
if (ret) {
......@@ -1491,9 +1540,9 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
cpsw_sl_ctl_reset(port->slave.mac_sl);
}
static void am65_cpsw_nuss_free_tx_chns(void *data)
static void am65_cpsw_nuss_free_tx_chns(struct am65_cpsw_common *common)
{
struct am65_cpsw_common *common = data;
struct device *dev = common->dev;
int i;
for (i = 0; i < common->tx_ch_num; i++) {
......@@ -1505,7 +1554,11 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
memset(tx_chn, 0, sizeof(*tx_chn));
/* Don't clear tx_chn memory as we need to preserve
* data between suspend/resume
*/
if (!(tx_chn->irq < 0))
devm_free_irq(dev, tx_chn->irq, tx_chn);
}
}
......@@ -1514,12 +1567,10 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
struct device *dev = common->dev;
int i;
devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
if (tx_chn->irq)
if (!(tx_chn->irq < 0))
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
......@@ -1589,7 +1640,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
}
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq <= 0) {
if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
goto err;
......@@ -1598,25 +1649,36 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
snprintf(tx_chn->tx_chn_name,
sizeof(tx_chn->tx_chn_name), "%s-tx%d",
dev_name(dev), tx_chn->id);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
IRQF_TRIGGER_HIGH,
tx_chn->tx_chn_name, tx_chn);
if (ret) {
dev_err(dev, "failure requesting tx%u irq %u, %d\n",
tx_chn->id, tx_chn->irq, ret);
tx_chn->irq = -EINVAL;
goto err;
}
}
return 0;
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
if (i) {
dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
return i;
}
am65_cpsw_nuss_free_tx_chns(common);
return ret;
}
static void am65_cpsw_nuss_free_rx_chns(void *data)
static void am65_cpsw_nuss_free_rx_chns(struct am65_cpsw_common *common)
{
struct am65_cpsw_common *common = data;
struct am65_cpsw_rx_chn *rx_chn;
rx_chn = &common->rx_chns;
if (!(rx_chn->irq < 0))
devm_free_irq(common->dev, rx_chn->irq, common);
if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
......@@ -1639,7 +1701,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
rx_cfg.flow_id_base = common->rx_flow_id_base;
rx_cfg.flow_id_base = -1;
/* init all flows */
rx_chn->dev = dev;
......@@ -1711,13 +1773,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
}
}
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
return i;
ret = devm_request_irq(dev, rx_chn->irq,
am65_cpsw_nuss_rx_irq,
IRQF_TRIGGER_HIGH, dev_name(dev), common);
if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n",
rx_chn->irq, ret);
rx_chn->irq = -EINVAL;
goto err;
}
return 0;
err:
am65_cpsw_nuss_free_rx_chns(common);
return ret;
}
......@@ -1982,6 +2052,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->slave.phylink_config.dev = &port->ndev->dev;
port->slave.phylink_config.type = PHYLINK_NETDEV;
port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
......@@ -2043,28 +2114,16 @@ static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
int i, ret = 0;
int i;
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
IRQF_TRIGGER_HIGH,
tx_chn->tx_chn_name, tx_chn);
if (ret) {
dev_err(dev, "failure requesting tx%u irq %u, %d\n",
tx_chn->id, tx_chn->irq, ret);
goto err;
}
}
err:
return ret;
return 0;
}
static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
......@@ -2533,15 +2592,6 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
ret = devm_request_irq(dev, common->rx_chns.irq,
am65_cpsw_nuss_rx_irq,
IRQF_TRIGGER_HIGH, dev_name(dev), common);
if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n",
common->rx_chns.irq, ret);
return ret;
}
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
......@@ -2665,6 +2715,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct clk *clk;
u64 id_temp;
int ret, i;
int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
......@@ -2695,7 +2746,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
return -ENOENT;
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
......@@ -2737,14 +2787,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
am65_cpsw_nuss_get_ver(common);
/* init tx channels */
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
goto err_of_clear;
ret = am65_cpsw_nuss_init_rx_chns(common);
if (ret)
goto err_of_clear;
ret = am65_cpsw_nuss_init_host_p(common);
if (ret)
goto err_of_clear;
......@@ -2768,6 +2810,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
goto err_of_clear;
}
ale_entries = common->ale->params.ale_entries;
common->ale_context = devm_kzalloc(dev,
ale_entries * ALE_ENTRY_WORDS * sizeof(u32),
GFP_KERNEL);
ret = am65_cpsw_init_cpts(common);
if (ret)
goto err_of_clear;
......@@ -2829,10 +2875,89 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int am65_cpsw_nuss_suspend(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
struct am65_cpsw_host *host_p = am65_common_get_host(common);
cpsw_ale_dump(common->ale, common->ale_context);
host_p->vid_context = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
ndev = port->ndev;
if (!ndev)
continue;
port->vid_context = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
netif_device_detach(ndev);
if (netif_running(ndev)) {
rtnl_lock();
ret = am65_cpsw_nuss_ndo_slave_stop(ndev);
rtnl_unlock();
if (ret < 0) {
netdev_err(ndev, "failed to stop: %d", ret);
return ret;
}
}
}
am65_cpts_suspend(common->cpts);
return 0;
}
static int am65_cpsw_nuss_resume(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
struct am65_cpsw_host *host_p = am65_common_get_host(common);
am65_cpts_resume(common->cpts);
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
ndev = port->ndev;
if (!ndev)
continue;
if (netif_running(ndev)) {
rtnl_lock();
ret = am65_cpsw_nuss_ndo_slave_open(ndev);
rtnl_unlock();
if (ret < 0) {
netdev_err(ndev, "failed to start: %d", ret);
return ret;
}
}
netif_device_attach(ndev);
writel(port->vid_context, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
}
writel(host_p->vid_context, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
cpsw_ale_restore(common->ale, common->ale_context);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend, am65_cpsw_nuss_resume)
};
static struct platform_driver am65_cpsw_nuss_driver = {
.driver = {
.name = AM65_CPSW_DRV_NAME,
.of_match_table = am65_cpsw_nuss_of_mtable,
.pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
.remove = am65_cpsw_nuss_remove,
......
......@@ -55,12 +55,16 @@ struct am65_cpsw_port {
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
/* Only for suspend resume context */
u32 vid_context;
};
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
void __iomem *stat_base;
/* Only for suspend resume context */
u32 vid_context;
};
struct am65_cpsw_tx_chn {
......@@ -145,6 +149,8 @@ struct am65_cpsw_common {
struct net_device *hw_bridge_dev;
struct notifier_block am65_cpsw_netdevice_nb;
unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
/* only for suspend/resume context restore */
u32 *ale_context;
};
struct am65_cpsw_ndev_stats {
......
......@@ -176,6 +176,16 @@ struct am65_cpts {
u32 genf_enable;
u32 hw_ts_enable;
struct sk_buff_head txq;
/* context save/restore */
u64 sr_cpts_ns;
u64 sr_ktime_ns;
u32 sr_control;
u32 sr_int_enable;
u32 sr_rftclk_sel;
u32 sr_ts_ppm_hi;
u32 sr_ts_ppm_low;
struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM];
struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM];
};
struct am65_cpts_skb_cb_data {
......@@ -1029,6 +1039,72 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
}
EXPORT_SYMBOL_GPL(am65_cpts_create);
void am65_cpts_suspend(struct am65_cpts *cpts)
{
/* save state and disable CPTS */
cpts->sr_control = am65_cpts_read32(cpts, control);
cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable);
cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel);
cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi);
cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low);
cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL);
cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real());
am65_cpts_disable(cpts);
clk_disable(cpts->refclk);
/* Save GENF state */
memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf));
/* Save ESTF state */
memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf));
}
EXPORT_SYMBOL_GPL(am65_cpts_suspend);
void am65_cpts_resume(struct am65_cpts *cpts)
{
int i;
s64 ktime_ns;
/* restore state and enable CPTS */
clk_enable(cpts->refclk);
am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel);
am65_cpts_set_add_val(cpts);
am65_cpts_write32(cpts, cpts->sr_control, control);
am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable);
/* Restore time to saved CPTS time + time in suspend/resume */
ktime_ns = ktime_to_ns(ktime_get_real());
ktime_ns -= cpts->sr_ktime_ns;
am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns);
/* Restore compensation (PPM) */
am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi);
am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low);
/* Restore GENF state */
for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) {
am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */
am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi);
am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo);
am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length);
am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control);
am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi);
am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low);
}
/* Restore ESTTF state */
for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */
am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi);
am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo);
am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length);
am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control);
am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi);
am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low);
}
}
EXPORT_SYMBOL_GPL(am65_cpts_resume);
static int am65_cpts_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
......
......@@ -28,6 +28,8 @@ u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
void am65_cpts_suspend(struct am65_cpts *cpts);
void am65_cpts_resume(struct am65_cpts *cpts);
#else
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
......@@ -69,6 +71,14 @@ static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
}
static inline void am65_cpts_suspend(struct am65_cpts *cpts)
{
}
static inline void am65_cpts_resume(struct am65_cpts *cpts)
{
}
#endif
#endif /* K3_CPTS_H_ */
......@@ -1452,6 +1452,16 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
}
}
void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data)
{
int i;
for (i = 0; i < ale->params.ale_entries; i++) {
cpsw_ale_write(ale, i, data);
data += ALE_ENTRY_WORDS;
}
}
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
......
......@@ -127,6 +127,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
void cpsw_ale_restore(struct cpsw_ale *ale, u32 *data);
u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment