Commit 802dcb43 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ethernet-ti-am65-cpsw-add-multi-port-support-in-mac-only-mode'

Grygorii Strashko says:

====================
net: ethernet: ti: am65-cpsw: add multi port support in mac-only mode

This series adds multi-port support in mac-only mode (multi MAC mode) to TI
AM65x CPSW driver in preparation for enabling support for multi-port devices,
like Main CPSW0 on K3 J721E SoC or future CPSW3g on K3 AM64x SoC.

The multi MAC mode is implemented by configuring every enabled port in "mac-only"
mode (all ingress packets are sent only to the Host port and egress packets
directed to target Ext. Port) and creating separate net_device for
every enabled Ext. port.

This series does not affect on existing CPSW2g one Ext. Port devices and xmit
path changes are done only for multi-port devices by splitting xmit path for
one-port and multi-port devices.

Patches 1-3: Preparation patches to improve K3 CPSW configuration depending on DT
Patches 4-5: Fix VLAN offload for multi MAC mode
Patch 6: Fixes CPTS context lose issue during PM runtime transition
Patch 7: Fixes TX csum offload for multi MAC mode
Patches 8-9: add multi-port support to TI AM65x CPSW
Patch 10: handle deferred probe with new dev_err_probe() API

changes in v3:
 - rebased
 - added Reviewed-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
 - added Patch 10 which is minor optimization

changes in v2:
- patch 8: xmit path split for one-port and multi-port devices to avoid
  performance losses
- patch 9: fixed the case when Port 1 is disabled
- Patch 7: added fix for TX csum offload

v2: https://lore.kernel.org/patchwork/cover/1321608/
v1: https://lore.kernel.org/patchwork/cover/1315766/
====================

Link: https://lore.kernel.org/r/20201030200707.24294-1-grygorii.strashko@ti.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2c4de211 8fbc2f9e
...@@ -241,8 +241,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, ...@@ -241,8 +241,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!vid) if (!vid)
unreg_mcast = port_mask; unreg_mcast = port_mask;
dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
ret = cpsw_ale_add_vlan(common->ale, vid, port_mask, ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
unreg_mcast, port_mask, 0); unreg_mcast, port_mask, 0);
pm_runtime_put(common->dev); pm_runtime_put(common->dev);
return ret; return ret;
...@@ -252,6 +252,7 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, ...@@ -252,6 +252,7 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid) __be16 proto, u16 vid)
{ {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret; int ret;
if (!netif_running(ndev) || !vid) if (!netif_running(ndev) || !vid)
...@@ -264,14 +265,15 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, ...@@ -264,14 +265,15 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
} }
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(common->ale, vid, 0); ret = cpsw_ale_del_vlan(common->ale, vid,
BIT(port->port_id) | ALE_PORT_HOST);
pm_runtime_put(common->dev); pm_runtime_put(common->dev);
return ret; return ret;
} }
static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port, static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
bool promisc) bool promisc)
{ {
struct am65_cpsw_common *common = port->common; struct am65_cpsw_common *common = port->common;
...@@ -296,7 +298,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) ...@@ -296,7 +298,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
bool promisc; bool promisc;
promisc = !!(ndev->flags & IFF_PROMISC); promisc = !!(ndev->flags & IFF_PROMISC);
am65_cpsw_slave_set_promisc_2g(port, promisc); am65_cpsw_slave_set_promisc(port, promisc);
if (promisc) if (promisc)
return; return;
...@@ -373,7 +375,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, ...@@ -373,7 +375,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE); AM65_CPSW_NAV_PS_DATA_SIZE);
cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb)); cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx); swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb; *((void **)swdata) = skb;
...@@ -426,9 +428,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, ...@@ -426,9 +428,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
writel(common->rx_flow_id_base, writel(common->rx_flow_id_base,
host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
/* en tx crc offload */ /* en tx crc offload */
if (features & NETIF_F_HW_CSUM) writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
host_p->port_base + AM65_CPSW_P0_REG_CTL);
am65_cpsw_nuss_set_p0_ptype(common); am65_cpsw_nuss_set_p0_ptype(common);
...@@ -629,13 +629,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) ...@@ -629,13 +629,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
if (port->slave.mac_only) if (port->slave.mac_only) {
/* enable mac-only mode on port */ /* enable mac-only mode on port */
cpsw_ale_control_set(common->ale, port->port_id, cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_MACONLY, 1); ALE_PORT_MACONLY, 1);
if (AM65_CPSW_IS_CPSW2G(common))
cpsw_ale_control_set(common->ale, port->port_id, cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_NOLEARN, 1); ALE_PORT_NOLEARN, 1);
}
port_mask = BIT(port->port_id) | ALE_PORT_HOST; port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_ucast(common->ale, ndev->dev_addr, cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
...@@ -767,7 +767,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, ...@@ -767,7 +767,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret; return ret;
} }
if (desc_dma & 0x1) { if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
return 0; return 0;
} }
...@@ -911,10 +911,57 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) ...@@ -911,10 +911,57 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_tx;
struct net_device *ndev;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
ndev = skb->dev;
am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
return skb;
}
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
if (netif_tx_queue_stopped(netif_txq)) {
/* Check whether the queue is stopped due to stalled
* tx dma, if the queue is stopped then wake the queue
* as we have free desc for tx
*/
__netif_tx_lock(netif_txq, smp_processor_id());
if (netif_running(ndev) &&
(k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
netif_tx_wake_queue(netif_txq);
__netif_tx_unlock(netif_txq);
}
}
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget) int chn, unsigned int budget)
{ {
struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn; struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq; struct netdev_queue *netif_txq;
...@@ -923,41 +970,68 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, ...@@ -923,41 +970,68 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t desc_dma; dma_addr_t desc_dma;
int res, num_tx = 0; int res, num_tx = 0;
void **swdata;
tx_chn = &common->tx_chns[chn]; tx_chn = &common->tx_chns[chn];
while (true) { while (true) {
struct am65_cpsw_ndev_priv *ndev_priv; spin_lock(&tx_chn->lock);
struct am65_cpsw_ndev_stats *stats;
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
spin_unlock(&tx_chn->lock);
if (res == -ENODATA) if (res == -ENODATA)
break; break;
if (desc_dma & 0x1) { if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt)) if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete); complete(&common->tdown_complete);
break; break;
} }
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
desc_dma); total_bytes = skb->len;
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
ndev = skb->dev; ndev = skb->dev;
napi_consume_skb(skb, budget);
num_tx++;
am65_cpts_tx_timestamp(common->cpts, skb); netif_txq = netdev_get_tx_queue(ndev, chn);
ndev_priv = netdev_priv(ndev); netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
return num_tx;
}
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
int chn, unsigned int budget)
{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
tx_chn = &common->tx_chns[chn];
while (true) {
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
if (res == -ENODATA)
break;
if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
break;
}
skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
ndev = skb->dev;
total_bytes += skb->len; total_bytes += skb->len;
napi_consume_skb(skb, budget); napi_consume_skb(skb, budget);
num_tx++; num_tx++;
...@@ -970,19 +1044,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, ...@@ -970,19 +1044,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
if (netif_tx_queue_stopped(netif_txq)) { am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
/* Check whether the queue is stopped due to stalled tx dma,
* if the queue is stopped then wake the queue as
* we have free desc for tx
*/
__netif_tx_lock(netif_txq, smp_processor_id());
if (netif_running(ndev) &&
(k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
MAX_SKB_FRAGS))
netif_tx_wake_queue(netif_txq);
__netif_tx_unlock(netif_txq);
}
dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
return num_tx; return num_tx;
...@@ -993,8 +1056,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) ...@@ -993,8 +1056,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
int num_tx; int num_tx;
num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
budget); num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
else
num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
num_tx = min(num_tx, budget); num_tx = min(num_tx, budget);
if (num_tx < budget) { if (num_tx < budget) {
napi_complete(napi_tx); napi_complete(napi_tx);
...@@ -1139,7 +1205,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, ...@@ -1139,7 +1205,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pktlen(first_desc, pkt_len); cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); if (AM65_CPSW_IS_CPSW2G(common)) {
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
} else {
spin_lock_bh(&tx_chn->lock);
ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
spin_unlock_bh(&tx_chn->lock);
}
if (ret) { if (ret) {
dev_err(dev, "can't push desc %d\n", ret); dev_err(dev, "can't push desc %d\n", ret);
/* inform bql */ /* inform bql */
...@@ -1369,32 +1441,7 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, ...@@ -1369,32 +1441,7 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped; stats->tx_dropped = dev->stats.tx_dropped;
} }
static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev, static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
netdev_features_t features)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
netdev_features_t changes = features ^ ndev->features;
struct am65_cpsw_host *host_p;
host_p = am65_common_get_host(common);
if (changes & NETIF_F_HW_CSUM) {
bool enable = !!(features & NETIF_F_HW_CSUM);
dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
enable ? "ON" : "OFF");
if (enable)
writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
host_p->port_base + AM65_CPSW_P0_REG_CTL);
else
writel(0,
host_p->port_base + AM65_CPSW_P0_REG_CTL);
}
return 0;
}
static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open, .ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop, .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
.ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
...@@ -1406,7 +1453,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { ...@@ -1406,7 +1453,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
}; };
...@@ -1417,7 +1463,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) ...@@ -1417,7 +1463,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
if (!port->disabled) if (!port->disabled)
return; return;
common->disabled_ports_mask |= BIT(port->port_id);
cpsw_ale_control_set(common->ale, port->port_id, cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
...@@ -1496,6 +1541,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) ...@@ -1496,6 +1541,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
snprintf(tx_chn->tx_chn_name, snprintf(tx_chn->tx_chn_name,
sizeof(tx_chn->tx_chn_name), "tx%d", i); sizeof(tx_chn->tx_chn_name), "tx%d", i);
spin_lock_init(&tx_chn->lock);
tx_chn->common = common; tx_chn->common = common;
tx_chn->id = i; tx_chn->id = i;
tx_chn->descs_num = max_desc_num; tx_chn->descs_num = max_desc_num;
...@@ -1515,9 +1561,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) ...@@ -1515,9 +1561,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->tx_chn_name, tx_chn->tx_chn_name,
&tx_cfg); &tx_cfg);
if (IS_ERR(tx_chn->tx_chn)) { if (IS_ERR(tx_chn->tx_chn)) {
ret = PTR_ERR(tx_chn->tx_chn); ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
dev_err(dev, "Failed to request tx dma channel %d\n", "Failed to request tx dma channel\n");
ret);
goto err; goto err;
} }
...@@ -1588,8 +1633,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) ...@@ -1588,8 +1633,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) { if (IS_ERR(rx_chn->rx_chn)) {
ret = PTR_ERR(rx_chn->rx_chn); ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
dev_err(dev, "Failed to request rx dma channel %d\n", ret); "Failed to request rx dma channel\n");
goto err; goto err;
} }
...@@ -1606,7 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) ...@@ -1606,7 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
}; };
struct k3_ring_cfg fdqring_cfg = { struct k3_ring_cfg fdqring_cfg = {
.elm_size = K3_RINGACC_RING_ELSIZE_8, .elm_size = K3_RINGACC_RING_ELSIZE_8,
.mode = K3_RINGACC_RING_MODE_MESSAGE,
.flags = K3_RINGACC_RING_SHARED, .flags = K3_RINGACC_RING_SHARED,
}; };
struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
...@@ -1620,6 +1664,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) ...@@ -1620,6 +1664,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num; rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num; rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg); i, &rx_flow_cfg);
...@@ -1725,6 +1770,13 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) ...@@ -1725,6 +1770,13 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
return ret; return ret;
} }
common->cpts = cpts; common->cpts = cpts;
/* Forbid PM runtime if CPTS is running.
* K3 CPSWxG modules may completely lose context during ON->OFF
* transitions depending on integration.
* AM65x/J721E MCU CPSW2G: false
* J721E MAIN_CPSW9G: true
*/
pm_runtime_forbid(dev);
return 0; return 0;
} }
...@@ -1778,8 +1830,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) ...@@ -1778,8 +1830,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return PTR_ERR(port->slave.mac_sl); return PTR_ERR(port->slave.mac_sl);
port->disabled = !of_device_is_available(port_np); port->disabled = !of_device_is_available(port_np);
if (port->disabled) if (port->disabled) {
common->disabled_ports_mask |= BIT(port->port_id);
continue; continue;
}
port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
if (IS_ERR(port->slave.ifphy)) { if (IS_ERR(port->slave.ifphy)) {
...@@ -1795,12 +1849,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) ...@@ -1795,12 +1849,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */ /* get phy/link info */
if (of_phy_is_fixed_link(port_np)) { if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np); ret = of_phy_register_fixed_link(port_np);
if (ret) { if (ret)
if (ret != -EPROBE_DEFER) return dev_err_probe(dev, ret,
dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", "failed to register fixed-link phy %pOF\n",
port_np, ret); port_np);
return ret;
}
port->slave.phy_node = of_node_get(port_np); port->slave.phy_node = of_node_get(port_np);
} else { } else {
port->slave.phy_node = port->slave.phy_node =
...@@ -1833,6 +1885,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) ...@@ -1833,6 +1885,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
} }
of_node_put(node); of_node_put(node);
/* is there at least one ext.port */
if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
dev_err(dev, "No Ext. port are available\n");
return -ENODEV;
}
return 0; return 0;
} }
...@@ -1843,14 +1901,18 @@ static void am65_cpsw_pcpu_stats_free(void *data) ...@@ -1843,14 +1901,18 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats); free_percpu(stats);
} }
static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) static int
am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{ {
struct am65_cpsw_ndev_priv *ndev_priv; struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_port *port; struct am65_cpsw_port *port;
int ret; int ret;
port = am65_common_get_port(common, 1); port = &common->ports[port_idx];
if (port->disabled)
return 0;
/* alloc netdev */ /* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev, port->ndev = devm_alloc_etherdev_mqs(common->dev,
...@@ -1879,7 +1941,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) ...@@ -1879,7 +1941,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->features = port->ndev->hw_features | port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG; port->ndev->vlan_features |= NETIF_F_SG;
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g; port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
/* Disable TX checksum offload by default due to HW bug */ /* Disable TX checksum offload by default due to HW bug */
...@@ -1892,29 +1954,41 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) ...@@ -1892,29 +1954,41 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free, ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
ndev_priv->stats); ndev_priv->stats);
if (ret) { if (ret)
dev_err(dev, "Failed to add percpu stat free action %d\n", ret); dev_err(dev, "failed to add percpu stat free action %d\n", ret);
return ret;
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
return ret;
}
static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
{
int ret;
int i;
for (i = 0; i < common->port_num; i++) {
ret = am65_cpsw_nuss_init_port_ndev(common, i);
if (ret)
return ret;
} }
netif_napi_add(port->ndev, &common->napi_rx, netif_napi_add(common->dma_ndev, &common->napi_rx,
am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT); am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
return ret; return ret;
} }
static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common) static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{ {
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_port *port;
int i, ret = 0; int i, ret = 0;
port = am65_common_get_port(common, 1);
for (i = 0; i < common->tx_ch_num; i++) { for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
netif_tx_napi_add(port->ndev, &tx_chn->napi_tx, netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT); am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
ret = devm_request_irq(dev, tx_chn->irq, ret = devm_request_irq(dev, tx_chn->irq,
...@@ -1932,16 +2006,27 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common) ...@@ -1932,16 +2006,27 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
return ret; return ret;
} }
static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
{
struct am65_cpsw_port *port;
int i;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
if (port->ndev)
unregister_netdev(port->ndev);
}
}
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{ {
struct device *dev = common->dev; struct device *dev = common->dev;
struct am65_cpsw_port *port; struct am65_cpsw_port *port;
int ret = 0; int ret = 0, i;
port = am65_common_get_port(common, 1); ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
if (ret) if (ret)
goto err; return ret;
ret = devm_request_irq(dev, common->rx_chns.irq, ret = devm_request_irq(dev, common->rx_chns.irq,
am65_cpsw_nuss_rx_irq, am65_cpsw_nuss_rx_irq,
...@@ -1949,17 +2034,31 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) ...@@ -1949,17 +2034,31 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
if (ret) { if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n", dev_err(dev, "failure requesting rx irq %u, %d\n",
common->rx_chns.irq, ret); common->rx_chns.irq, ret);
goto err; return ret;
}
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
if (!port->ndev)
continue;
ret = register_netdev(port->ndev);
if (ret) {
dev_err(dev, "error registering slave net device%i %d\n",
i, ret);
goto err_cleanup_ndev;
}
} }
ret = register_netdev(port->ndev);
if (ret)
dev_err(dev, "error registering slave net device %d\n", ret);
/* can't auto unregister ndev using devm_add_action() due to /* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA * devres release sequence in DD core for DMA
*/ */
err: return 0;
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
return ret; return ret;
} }
...@@ -1972,19 +2071,7 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) ...@@ -1972,19 +2071,7 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
if (ret) if (ret)
return ret; return ret;
return am65_cpsw_nuss_ndev_add_napi_2g(common); return am65_cpsw_nuss_ndev_add_tx_napi(common);
}
static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
{
struct am65_cpsw_port *port;
int i;
for (i = 0; i < common->port_num; i++) {
port = &common->ports[i];
if (port->ndev)
unregister_netdev(port->ndev);
}
} }
struct am65_cpsw_soc_pdata { struct am65_cpsw_soc_pdata {
...@@ -2005,10 +2092,14 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = { ...@@ -2005,10 +2092,14 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = {
static const struct am65_cpsw_pdata am65x_sr1_0 = { static const struct am65_cpsw_pdata am65x_sr1_0 = {
.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
.ale_dev_id = "am65x-cpsw2g",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
}; };
static const struct am65_cpsw_pdata j721e_pdata = { static const struct am65_cpsw_pdata j721e_pdata = {
.quirks = 0, .quirks = 0,
.ale_dev_id = "am65x-cpsw2g",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
}; };
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
...@@ -2068,9 +2159,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2068,9 +2159,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOENT; return -ENOENT;
of_node_put(node); of_node_put(node);
if (common->port_num != 1)
return -EOPNOTSUPP;
common->rx_flow_id_base = -1; common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete); init_completion(&common->tdown_complete);
common->tx_ch_num = 1; common->tx_ch_num = 1;
...@@ -2089,13 +2177,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2089,13 +2177,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
clk = devm_clk_get(dev, "fck"); clk = devm_clk_get(dev, "fck");
if (IS_ERR(clk)) { if (IS_ERR(clk))
ret = PTR_ERR(clk); return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
if (ret != -EPROBE_DEFER)
dev_err(dev, "error getting fck clock %d\n", ret);
return ret;
}
common->bus_freq = clk_get_rate(clk); common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev); pm_runtime_enable(dev);
...@@ -2145,7 +2228,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2145,7 +2228,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
ale_params.ale_ports = common->port_num + 1; ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
ale_params.dev_id = "am65x-cpsw2g"; ale_params.dev_id = common->pdata.ale_dev_id;
ale_params.bus_freq = common->bus_freq; ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params); common->ale = cpsw_ale_create(&ale_params);
...@@ -2165,11 +2248,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2165,11 +2248,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common); dev_set_drvdata(dev, common);
ret = am65_cpsw_nuss_init_ndev_2g(common); ret = am65_cpsw_nuss_init_ndevs(common);
if (ret) if (ret)
goto err_of_clear; goto err_of_clear;
ret = am65_cpsw_nuss_ndev_reg_2g(common); ret = am65_cpsw_nuss_register_ndevs(common);
if (ret) if (ret)
goto err_of_clear; goto err_of_clear;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include "am65-cpsw-qos.h" #include "am65-cpsw-qos.h"
struct am65_cpts; struct am65_cpts;
...@@ -59,6 +60,7 @@ struct am65_cpsw_tx_chn { ...@@ -59,6 +60,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_common *common; struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool; struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn; struct k3_udma_glue_tx_channel *tx_chn;
spinlock_t lock; /* protect TX rings in multi-port mode */
int irq; int irq;
u32 id; u32 id;
u32 descs_num; u32 descs_num;
...@@ -77,6 +79,8 @@ struct am65_cpsw_rx_chn { ...@@ -77,6 +79,8 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata { struct am65_cpsw_pdata {
u32 quirks; u32 quirks;
enum k3_ring_mode fdqring_mode;
const char *ale_dev_id;
}; };
struct am65_cpsw_common { struct am65_cpsw_common {
...@@ -91,6 +95,7 @@ struct am65_cpsw_common { ...@@ -91,6 +95,7 @@ struct am65_cpsw_common {
struct am65_cpsw_host host; struct am65_cpsw_host host;
struct am65_cpsw_port *ports; struct am65_cpsw_port *ports;
u32 disabled_ports_mask; u32 disabled_ports_mask;
struct net_device *dma_ndev;
int usage_count; /* number of opened ports */ int usage_count; /* number of opened ports */
struct cpsw_ale *ale; struct cpsw_ale *ale;
......
...@@ -634,8 +634,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag, ...@@ -634,8 +634,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
return 0; return 0;
} }
static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry,
u16 vid, int port_mask) u16 vid, int port_mask)
{ {
int reg_mcast, unreg_mcast; int reg_mcast, unreg_mcast;
int members, untag; int members, untag;
...@@ -644,6 +644,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, ...@@ -644,6 +644,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST); ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask; members &= ~port_mask;
if (!members) { if (!members) {
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return; return;
} }
...@@ -673,7 +674,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, ...@@ -673,7 +674,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST, members); ALE_ENT_VID_MEMBER_LIST, members);
} }
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask)
{ {
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx; int idx;
...@@ -684,11 +685,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) ...@@ -684,11 +685,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry); cpsw_ale_read(ale, idx, ale_entry);
if (port_mask) { cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask); cpsw_ale_write(ale, idx, ale_entry);
} else {
return 0;
}
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int members, idx;
idx = cpsw_ale_match_vlan(ale, vid);
if (idx < 0)
return -ENOENT;
cpsw_ale_read(ale, idx, ale_entry);
/* if !port_mask - force remove VLAN (legacy).
* Check if there are other VLAN members ports
* if no - remove VLAN.
* if yes it means same VLAN was added to >1 port in multi port mode, so
* remove port_mask ports from VLAN ALE entry excluding Host port.
*/
members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!port_mask || !members) {
/* last port or force remove - remove VLAN */
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0); cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
} else {
port_mask &= ~ALE_PORT_HOST;
cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
} }
cpsw_ale_write(ale, idx, ale_entry); cpsw_ale_write(ale, idx, ale_entry);
......
...@@ -134,6 +134,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid) ...@@ -134,6 +134,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int untag_mask, int reg_mcast, int unreg_mcast); int untag_mask, int reg_mcast, int unreg_mcast);
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add); bool add);
......
...@@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid, ...@@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
else else
port_mask = BIT(priv->emac_port); port_mask = BIT(priv->emac_port);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask); ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask);
if (ret != 0) if (ret != 0)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment