Commit 84a3f4db authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Thomas Gleixner

net/mvneta: Convert to hotplug state machine

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: netdev@vger.kernel.org
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160818125731.27256-9-bigeasy@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c4544dbc
...@@ -382,7 +382,8 @@ struct mvneta_port { ...@@ -382,7 +382,8 @@ struct mvneta_port {
struct mvneta_rx_queue *rxqs; struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs; struct mvneta_tx_queue *txqs;
struct net_device *dev; struct net_device *dev;
struct notifier_block cpu_notifier; struct hlist_node node_online;
struct hlist_node node_dead;
int rxq_def; int rxq_def;
/* Protect the access to the percpu interrupt registers, /* Protect the access to the percpu interrupt registers,
* ensuring that the configuration remains coherent. * ensuring that the configuration remains coherent.
...@@ -574,6 +575,7 @@ struct mvneta_rx_queue { ...@@ -574,6 +575,7 @@ struct mvneta_rx_queue {
int next_desc_to_proc; int next_desc_to_proc;
}; };
static enum cpuhp_state online_hpstate;
/* The hardware supports eight (8) rx queues, but we are only allowing /* The hardware supports eight (8) rx queues, but we are only allowing
* the first one to be used. Therefore, let's just allocate one queue. * the first one to be used. Therefore, let's just allocate one queue.
*/ */
...@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) ...@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
} }
}; };
static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
unsigned long action, void *hcpu)
{ {
struct mvneta_port *pp = container_of(nfb, struct mvneta_port, int other_cpu;
cpu_notifier); struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
int cpu = (unsigned long)hcpu, other_cpu; node_online);
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
spin_lock(&pp->lock);
/* Configuring the driver for a new CPU while the
* driver is stopping is racy, so just avoid it.
*/
if (pp->is_stopped) {
spin_unlock(&pp->lock);
break;
}
netif_tx_stop_all_queues(pp->dev);
/* We have to synchronise on tha napi of each CPU spin_lock(&pp->lock);
* except the one just being waked up /*
*/ * Configuring the driver for a new CPU while the driver is
for_each_online_cpu(other_cpu) { * stopping is racy, so just avoid it.
if (other_cpu != cpu) { */
struct mvneta_pcpu_port *other_port = if (pp->is_stopped) {
per_cpu_ptr(pp->ports, other_cpu); spin_unlock(&pp->lock);
return 0;
}
netif_tx_stop_all_queues(pp->dev);
napi_synchronize(&other_port->napi); /*
} * We have to synchronise on tha napi of each CPU except the one
* just being woken up
*/
for_each_online_cpu(other_cpu) {
if (other_cpu != cpu) {
struct mvneta_pcpu_port *other_port =
per_cpu_ptr(pp->ports, other_cpu);
napi_synchronize(&other_port->napi);
} }
}
/* Mask all ethernet port interrupts */ /* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
napi_enable(&port->napi); napi_enable(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
* brought up.
*/
mvneta_percpu_enable(pp);
/* Enable per-CPU interrupts on the CPU that is /*
* brought up. * Enable per-CPU interrupt on the one CPU we care
*/ * about.
mvneta_percpu_enable(pp); */
mvneta_percpu_elect(pp);
/* Enable per-CPU interrupt on the one CPU we care /* Unmask all ethernet port interrupts */
* about. on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
*/ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
mvneta_percpu_elect(pp); MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
/* Unmask all ethernet port interrupts */ MVNETA_CAUSE_PSC_SYNC_CHANGE);
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); netif_tx_start_all_queues(pp->dev);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, spin_unlock(&pp->lock);
MVNETA_CAUSE_PHY_STATUS_CHANGE | return 0;
MVNETA_CAUSE_LINK_CHANGE | }
MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
netif_tx_stop_all_queues(pp->dev);
/* Thanks to this lock we are sure that any pending
* cpu election is done
*/
spin_lock(&pp->lock);
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
spin_unlock(&pp->lock);
napi_synchronize(&port->napi); static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
napi_disable(&port->napi); {
/* Disable per-CPU interrupts on the CPU that is struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
* brought down. node_online);
*/ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
mvneta_percpu_disable(pp);
break; /*
case CPU_DEAD: * Thanks to this lock we are sure that any pending cpu election is
case CPU_DEAD_FROZEN: * done.
/* Check if a new CPU must be elected now this on is down */ */
spin_lock(&pp->lock); spin_lock(&pp->lock);
mvneta_percpu_elect(pp); /* Mask all ethernet port interrupts */
spin_unlock(&pp->lock); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
/* Unmask all ethernet port interrupts */ spin_unlock(&pp->lock);
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev);
break;
}
return NOTIFY_OK; napi_synchronize(&port->napi);
napi_disable(&port->napi);
/* Disable per-CPU interrupts on the CPU that is brought down. */
mvneta_percpu_disable(pp);
return 0;
}
static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
node_dead);
/* Check if a new CPU must be elected now this on is down */
spin_lock(&pp->lock);
mvneta_percpu_elect(pp);
spin_unlock(&pp->lock);
/* Unmask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev);
return 0;
} }
static int mvneta_open(struct net_device *dev) static int mvneta_open(struct net_device *dev)
...@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev) ...@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev)
/* Register a CPU notifier to handle the case where our CPU /* Register a CPU notifier to handle the case where our CPU
* might be taken offline. * might be taken offline.
*/ */
register_cpu_notifier(&pp->cpu_notifier); ret = cpuhp_state_add_instance_nocalls(online_hpstate,
&pp->node_online);
if (ret)
goto err_free_irq;
ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
&pp->node_dead);
if (ret)
goto err_free_online_hp;
/* In default link is down */ /* In default link is down */
netif_carrier_off(pp->dev); netif_carrier_off(pp->dev);
...@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev) ...@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev)
ret = mvneta_mdio_probe(pp); ret = mvneta_mdio_probe(pp);
if (ret < 0) { if (ret < 0) {
netdev_err(dev, "cannot probe MDIO bus\n"); netdev_err(dev, "cannot probe MDIO bus\n");
goto err_free_irq; goto err_free_dead_hp;
} }
mvneta_start_dev(pp); mvneta_start_dev(pp);
return 0; return 0;
err_free_dead_hp:
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
&pp->node_dead);
err_free_online_hp:
cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
err_free_irq: err_free_irq:
unregister_cpu_notifier(&pp->cpu_notifier);
on_each_cpu(mvneta_percpu_disable, pp, true); on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports); free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs: err_cleanup_txqs:
...@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev) ...@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev)
mvneta_stop_dev(pp); mvneta_stop_dev(pp);
mvneta_mdio_remove(pp); mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
&pp->node_dead);
on_each_cpu(mvneta_percpu_disable, pp, true); on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports); free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp); mvneta_cleanup_rxqs(pp);
...@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev) ...@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev)
err = of_property_read_string(dn, "managed", &managed); err = of_property_read_string(dn, "managed", &managed);
pp->use_inband_status = (err == 0 && pp->use_inband_status = (err == 0 &&
strcmp(managed, "in-band-status") == 0); strcmp(managed, "in-band-status") == 0);
pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->rxq_def = rxq_def; pp->rxq_def = rxq_def;
...@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = { ...@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = {
}, },
}; };
module_platform_driver(mvneta_driver); static int __init mvneta_driver_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
mvneta_cpu_online,
mvneta_cpu_down_prepare);
if (ret < 0)
goto out;
online_hpstate = ret;
ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
NULL, mvneta_cpu_dead);
if (ret)
goto err_dead;
ret = platform_driver_register(&mvneta_driver);
if (ret)
goto err;
return 0;
err:
cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
err_dead:
cpuhp_remove_multi_state(online_hpstate);
out:
return ret;
}
module_init(mvneta_driver_init);
static void __exit mvneta_driver_exit(void)
{
platform_driver_unregister(&mvneta_driver);
cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
cpuhp_remove_multi_state(online_hpstate);
}
module_exit(mvneta_driver_exit);
MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
......
...@@ -18,6 +18,7 @@ enum cpuhp_state { ...@@ -18,6 +18,7 @@ enum cpuhp_state {
CPUHP_SLUB_DEAD, CPUHP_SLUB_DEAD,
CPUHP_MM_WRITEBACK_DEAD, CPUHP_MM_WRITEBACK_DEAD,
CPUHP_SOFTIRQ_DEAD, CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
CPUHP_WORKQUEUE_PREP, CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE, CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE, CPUHP_HRTIMERS_PREPARE,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment