Commit 685df9c3 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-dsa-turn-arrays-of-ports-into-a-list'

Vivien Didelot says:

====================
The dsa_switch structure represents the physical switch device itself,
and is allocated by the driver. The dsa_switch_tree and dsa_port structures
represent the logical switch fabric (eventually composed of multiple switch
devices) and its ports, and are allocated by the DSA core.

This branch lists the logical ports directly in the fabric which simplifies
the iteration over all ports when assigning the default CPU port or configuring
the D in DSA in drivers like mv88e6xxx.

This also removes the unique dst->cpu_dp pointer and is a first step towards
supporting multiple CPU ports and dropping the DSA_MAX_PORTS limitation.

Because the dsa_port structures are not tied to the dsa_switch structure
anymore, we do not need to provide an helper for the drivers to allocate a
switch structure. Like in many other subsystems, drivers can now embed their
dsa_switch structure as they wish into their private structure. This will
be particularly interesting for the Broadcom drivers which were currently
limited by the dynamically allocated array of DSA ports.

The series implements the list of dsa_port structures, makes use of it,
then drops dst->cpu_dp and the dsa_switch_alloc helper.
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents 88652bf8 7e99e347
......@@ -524,7 +524,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
if (!dsa_is_user_port(ds, port))
return 0;
cpu_port = ds->ports[port].cpu_dp->index;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
......@@ -1629,7 +1629,7 @@ EXPORT_SYMBOL(b53_fdb_dump);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = ds->ports[port].cpu_dp->index;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
......@@ -1675,7 +1675,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
s8 cpu_port = ds->ports[port].cpu_dp->index;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
......@@ -2341,10 +2341,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
......
......@@ -662,7 +662,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
netif_carrier_off(ds->ports[port].slave);
netif_carrier_off(dsa_to_port(ds, port)->slave);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
......@@ -728,7 +728,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
......@@ -752,9 +752,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
if (p->ethtool_ops->get_wol)
......
......@@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
int ret;
......@@ -1049,7 +1049,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
......@@ -1092,7 +1092,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
struct net_device *p = ds->ports[port].cpu_dp->master;
struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
......
......@@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
dev_info(&mdiodev->dev, "%s: 0x%0x\n",
pdata->name, pdata->enabled_ports);
ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = &mdiodev->dev;
ds->num_ports = DSA_MAX_PORTS;
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
if (!ps)
return -ENOMEM;
......
......@@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struct lan9303 *chip)
{
int base;
chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
chip->ds->dev = chip->dev;
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
base = chip->phy_addr_base;
......
......@@ -1854,10 +1854,12 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv->hw_info)
return -EINVAL;
priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = dev;
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = &gswip_switch_ops;
priv->dev = dev;
......
......@@ -398,10 +398,13 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
struct dsa_switch *ds;
struct ksz_device *swdev;
ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
return NULL;
......
......@@ -862,7 +862,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(&ds->ports[i])) {
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
break;
}
......@@ -922,7 +922,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* other port is still a VLAN-aware port.
*/
if (dsa_is_user_port(ds, i) && i != port &&
!dsa_port_is_vlan_filtering(&ds->ports[i])) {
!dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
......@@ -1165,7 +1165,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return;
mutex_lock(&priv->reg_mutex);
......@@ -1196,7 +1196,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
return 0;
mutex_lock(&priv->reg_mutex);
......@@ -1252,7 +1252,7 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
if (priv->id == ID_MT7530) {
priv->ethernet = syscon_node_to_regmap(dn);
......@@ -1340,7 +1340,7 @@ mt7530_setup(struct dsa_switch *ds)
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
interface = of_get_phy_mode(ds->ports[5].dn);
interface = of_get_phy_mode(dsa_to_port(ds, 5)->dn);
} else {
/* Scan the ethernet nodes. look for GMAC1, lookup used phy */
for_each_child_of_node(dn, mac_np) {
......@@ -1632,10 +1632,13 @@ mt7530_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = DSA_MAX_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
*/
......
......@@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_device *mdiodev)
dev_info(dev, "switch %s detected\n", name);
ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = MV88E6060_PORTS;
ds->priv = priv;
ds->dev = dev;
ds->ops = &mv88e6060_switch_ops;
......
......@@ -1057,35 +1057,43 @@ static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
/* Mask of the local ports allowed to receive frames from a given fabric port */
static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch *ds = NULL;
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
struct net_device *br;
struct dsa_port *dp;
bool found = false;
u16 pvlan;
int i;
if (dev < DSA_MAX_SWITCHES)
ds = chip->ds->dst->ds[dev];
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index == dev && dp->index == port) {
found = true;
break;
}
}
/* Prevent frames from unknown switch or port */
if (!ds || port >= ds->num_ports)
if (!found)
return 0;
/* Frames from DSA links and CPU ports can egress any local port */
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA)
return mv88e6xxx_port_mask(chip);
br = ds->ports[port].bridge_dev;
br = dp->bridge_dev;
pvlan = 0;
/* Frames from user ports can egress any local DSA links and CPU ports,
* as well as any local member of their bridge group.
*/
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (dsa_is_cpu_port(chip->ds, i) ||
dsa_is_dsa_port(chip->ds, i) ||
(br && dsa_to_port(chip->ds, i)->bridge_dev == br))
pvlan |= BIT(i);
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds == ds &&
(dp->type == DSA_PORT_TYPE_CPU ||
dp->type == DSA_PORT_TYPE_DSA ||
(br && dp->bridge_dev == br)))
pvlan |= BIT(dp->index);
return pvlan;
}
......@@ -1253,7 +1261,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
u16 pvlan = 0;
if (!mv88e6xxx_has_pvt(chip))
return -EOPNOTSUPP;
return 0;
/* Skip the local source device, which uses in-chip port VLAN */
if (dev != chip->ds->index)
......@@ -1402,7 +1410,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
if (!ds->ports[i].slave)
if (!dsa_to_port(ds, i)->slave)
continue;
if (vlan.member[i] ==
......@@ -1410,7 +1418,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
continue;
if (dsa_to_port(ds, i)->bridge_dev ==
ds->ports[port].bridge_dev)
dsa_to_port(ds, port)->bridge_dev)
break; /* same bridge, check next VLAN */
if (!dsa_to_port(ds, i)->bridge_dev)
......@@ -2035,32 +2043,26 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
struct net_device *br)
{
struct dsa_switch *ds;
int port;
int dev;
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp;
int err;
/* Remap the Port VLAN of each local bridge group member */
for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
if (chip->ds->ports[port].bridge_dev == br) {
err = mv88e6xxx_port_vlan_map(chip, port);
if (err)
return err;
}
}
if (!mv88e6xxx_has_pvt(chip))
return 0;
/* Remap the Port VLAN of each cross-chip bridge group member */
for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) {
ds = chip->ds->dst->ds[dev];
if (!ds)
break;
for (port = 0; port < ds->num_ports; ++port) {
if (ds->ports[port].bridge_dev == br) {
err = mv88e6xxx_pvt_map(chip, dev, port);
list_for_each_entry(dp, &dst->ports, list) {
if (dp->bridge_dev == br) {
if (dp->ds == ds) {
/* This is a local bridge group member,
* remap its Port VLAN Map.
*/
err = mv88e6xxx_port_vlan_map(chip, dp->index);
if (err)
return err;
} else {
/* This is an external bridge group member,
* remap its cross-chip Port VLAN Table entry.
*/
err = mv88e6xxx_pvt_map(chip, dp->ds->index,
dp->index);
if (err)
return err;
}
......@@ -2101,9 +2103,6 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
if (!mv88e6xxx_has_pvt(chip))
return 0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
mv88e6xxx_reg_unlock(chip);
......@@ -2116,9 +2115,6 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev,
{
struct mv88e6xxx_chip *chip = ds->priv;
if (!mv88e6xxx_has_pvt(chip))
return;
mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
......@@ -4982,10 +4978,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
struct device *dev = chip->dev;
struct dsa_switch *ds;
ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip));
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = mv88e6xxx_num_ports(chip);
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
......
......@@ -661,7 +661,7 @@ qca8k_setup(struct dsa_switch *ds)
return ret;
/* Initialize CPU port pad mode (xMII type, delays...) */
phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
phy_mode = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn);
if (phy_mode < 0) {
pr_err("Can't find phy-mode for master device\n");
return phy_mode;
......@@ -1077,10 +1077,13 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
QCA8K_NUM_PORTS);
if (!priv->ds)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = DSA_MAX_PORTS;
priv->ds->priv = priv;
priv->ops = qca8k_switch_ops;
priv->ds->ops = &priv->ops;
......
......@@ -444,9 +444,12 @@ static int realtek_smi_probe(struct platform_device *pdev)
return ret;
}
smi->ds = dsa_switch_alloc(dev, smi->num_ports);
smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
if (!smi->ds)
return -ENOMEM;
smi->ds->dev = dev;
smi->ds->num_ports = smi->num_ports;
smi->ds->priv = smi;
smi->ds->ops = var->ds_ops;
......
......@@ -1058,7 +1058,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
......@@ -1121,7 +1121,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) {
l2_lookup.mask_vlanid = VLAN_VID_MASK;
l2_lookup.mask_iotag = BIT(0);
} else {
......@@ -1167,7 +1167,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
* for what gets printed in 'bridge fdb show'. In the case of zero,
* no VID gets printed at all.
*/
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_add_cmd(ds, port, addr, vid);
......@@ -1178,7 +1178,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
vid = 0;
return priv->info->fdb_del_cmd(ds, port, addr, vid);
......@@ -1217,7 +1217,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
......@@ -1704,7 +1704,7 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
slave = ds->ports[port].slave;
slave = dsa_to_port(ds, port)->slave;
slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
......@@ -1736,7 +1736,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
dsa_enqueue_skb(skb, ds->ports[port].slave);
dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
/* Wait until the switch has processed the frame */
do {
......@@ -2047,30 +2047,37 @@ static int sja1105_probe(struct spi_device *spi)
dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
if (!ds)
return -ENOMEM;
ds->dev = dev;
ds->num_ports = SJA1105_NUM_PORTS;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
tagger_data = &priv->tagger_data;
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
sja1105_tas_setup(ds);
rc = dsa_register_switch(priv->ds);
if (rc)
return rc;
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
struct sja1105_port *sp = &priv->ports[i];
ds->ports[i].priv = sp;
sp->dp = &ds->ports[i];
dsa_to_port(ds, i)->priv = sp;
sp->dp = dsa_to_port(ds, i);
sp->data = tagger_data;
}
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
sja1105_tas_setup(ds);
return dsa_register_switch(priv->ds);
return 0;
}
static int sja1105_remove(struct spi_device *spi)
......
......@@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc)
* We allocate 8 ports and avoid access to the nonexistant
* ports.
*/
vsc->ds = dsa_switch_alloc(dev, 8);
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
vsc->ds->num_ports = 8;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
......
......@@ -120,10 +120,8 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
/*
* The switch port to which the CPU is attached.
*/
struct dsa_port *cpu_dp;
/* List of switch ports */
struct list_head ports;
/*
* Data for the individual switch chips.
......@@ -195,6 +193,8 @@ struct dsa_port {
struct work_struct xmit_work;
struct sk_buff_head xmit_queue;
struct list_head list;
/*
* Give the switch driver somewhere to hang its per-port private data
* structures (accessible from the tagger).
......@@ -210,9 +210,13 @@ struct dsa_port {
* Original copy of the master netdev net_device_ops
*/
const struct net_device_ops *orig_ndo_ops;
bool setup;
};
struct dsa_switch {
bool setup;
struct device *dev;
/*
......@@ -273,14 +277,19 @@ struct dsa_switch {
*/
bool vlan_filtering;
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
};
static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
{
return &ds->ports[p];
struct dsa_switch_tree *dst = ds->dst;
struct dsa_port *dp = NULL;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds == ds && dp->index == p)
break;
return dp;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
......@@ -568,7 +577,6 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
#ifdef CONFIG_PM_SLEEP
......
......@@ -246,7 +246,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
#ifdef CONFIG_PM_SLEEP
static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return dsa_is_user_port(ds, p) && ds->ports[p].slave;
const struct dsa_port *dp = dsa_to_port(ds, p);
return dp->type == DSA_PORT_TYPE_USER && dp->slave;
}
int dsa_switch_suspend(struct dsa_switch *ds)
......@@ -258,7 +260,7 @@ int dsa_switch_suspend(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
ret = dsa_slave_suspend(ds->ports[i].slave);
ret = dsa_slave_suspend(dsa_to_port(ds, i)->slave);
if (ret)
return ret;
}
......@@ -285,7 +287,7 @@ int dsa_switch_resume(struct dsa_switch *ds)
if (!dsa_is_port_initialized(ds, i))
continue;
ret = dsa_slave_resume(ds->ports[i].slave);
ret = dsa_slave_resume(dsa_to_port(ds, i)->slave);
if (ret)
return ret;
}
......
This diff is collapsed.
......@@ -104,25 +104,14 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_switch *ds;
struct dsa_port *slave_port;
struct dsa_port *dp;
if (device < 0 || device >= DSA_MAX_SWITCHES)
return NULL;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
return dp->slave;
ds = dst->ds[device];
if (!ds)
return NULL;
if (port < 0 || port >= ds->num_ports)
return NULL;
slave_port = &ds->ports[port];
if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
return NULL;
return slave_port->slave;
return NULL;
}
/* port.c */
......
......@@ -20,7 +20,7 @@ static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
int i;
for (i = 0; i < ds->num_ports; ++i) {
struct dsa_port *dp = &ds->ports[i];
struct dsa_port *dp = dsa_to_port(ds, i);
if (dp->ageing_time && dp->ageing_time < ageing_time)
ageing_time = dp->ageing_time;
......@@ -98,7 +98,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (unset_vlan_filtering) {
struct switchdev_trans trans = {0};
err = dsa_port_vlan_filtering(&ds->ports[info->port],
err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
false, &trans);
if (err && err != EOPNOTSUPP)
return err;
......
......@@ -103,7 +103,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port))
return 0;
slave = ds->ports[port].slave;
slave = dsa_to_port(ds, port)->slave;
err = br_vlan_get_pvid(slave, &pvid);
if (err < 0)
......@@ -118,7 +118,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
return err;
}
return dsa_port_vid_add(&ds->ports[port], pvid, vinfo.flags);
return dsa_port_vid_add(dsa_to_port(ds, port), pvid, vinfo.flags);
}
/* If @enabled is true, installs @vid with @flags into the switch port's HW
......@@ -130,7 +130,7 @@ static int dsa_8021q_restore_pvid(struct dsa_switch *ds, int port)
static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid,
u16 flags, bool enabled)
{
struct dsa_port *dp = &ds->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct bridge_vlan_info vinfo;
int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment