Commit 703e5c99 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

[S390] cio: introduce consistent subchannel scanning

Previously, there were multiple subchannel scanning mechanisms
which could potentially conflict with each other. Fix this problem
by moving blacklist and ccw driver triggered scanning to the
existing evaluation method.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent b0a285d3
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include "chp.h" #include "chp.h"
int css_init_done = 0; int css_init_done = 0;
static int need_reprobe = 0;
int max_ssid; int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
...@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid) ...@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid)
int ret; int ret;
struct subchannel *sch; struct subchannel *sch;
if (cio_is_console(schid))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(schid); sch = css_alloc_subchannel(schid);
if (IS_ERR(sch)) if (IS_ERR(sch))
return PTR_ERR(sch); return PTR_ERR(sch);
}
ret = css_register_subchannel(sch); ret = css_register_subchannel(sch);
if (ret) if (ret) {
if (!cio_is_console(schid))
put_device(&sch->dev); put_device(&sch->dev);
}
return ret; return ret;
} }
...@@ -510,76 +515,48 @@ void css_schedule_eval_all(void) ...@@ -510,76 +515,48 @@ void css_schedule_eval_all(void)
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
void css_wait_for_slow_path(void) static int __unset_registered(struct device *dev, void *data)
{ {
flush_workqueue(slow_path_wq); struct idset *set = data;
} struct subchannel *sch = to_subchannel(dev);
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
int ret;
CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
schid.ssid, schid.sch_no);
if (need_reprobe)
return -EAGAIN;
ret = css_probe_device(schid);
switch (ret) {
case 0:
break;
case -ENXIO:
case -ENOMEM:
case -EIO:
/* These should abort looping */
break;
default:
ret = 0;
}
return ret;
}
static void reprobe_after_idle(struct work_struct *unused) idset_sch_del(set, sch->schid);
{ return 0;
/* Make sure initial subchannel scan is done. */
wait_event(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
if (need_reprobe)
css_schedule_reprobe();
} }
static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); void css_schedule_eval_all_unreg(void)
/* Work function used to reprobe all unregistered subchannels. */
static void reprobe_all(struct work_struct *unused)
{ {
int ret; unsigned long flags;
struct idset *unreg_set;
CIO_MSG_EVENT(4, "reprobe start\n");
/* Make sure initial subchannel scan is done. */ /* Find unregistered subchannels. */
if (atomic_read(&ccw_device_init_count) != 0) { unreg_set = idset_sch_new();
queue_work(ccw_device_work, &reprobe_idle_work); if (!unreg_set) {
/* Fallback. */
css_schedule_eval_all();
return; return;
} }
need_reprobe = 0; idset_fill(unreg_set);
ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
/* Apply to slow_subchannel_set. */
CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, spin_lock_irqsave(&slow_subchannel_lock, flags);
need_reprobe); idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
} }
static DECLARE_WORK(css_reprobe_work, reprobe_all); void css_wait_for_slow_path(void)
{
flush_workqueue(slow_path_wq);
}
/* Schedule reprobing of all unregistered subchannels. */ /* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe(void) void css_schedule_reprobe(void)
{ {
need_reprobe = 1; css_schedule_eval_all_unreg();
queue_work(slow_path_wq, &css_reprobe_work);
} }
EXPORT_SYMBOL_GPL(css_schedule_reprobe); EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/* /*
...@@ -615,48 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) ...@@ -615,48 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
css_evaluate_subchannel(mchk_schid, 0); css_evaluate_subchannel(mchk_schid, 0);
} }
static int __init setup_subchannel(struct subchannel_id schid, void *data)
{
struct subchannel *sch;
int ret;
if (cio_is_console(schid))
sch = cio_get_console_subchannel();
else {
sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
ret = PTR_ERR(sch);
else
ret = 0;
switch (ret) {
case 0:
break;
case -ENOMEM:
panic("Out of memory in init_channel_subsystem\n");
/* -ENXIO: no more subchannels. */
case -ENXIO:
return ret;
/* -EIO: this subchannel set not supported. */
case -EIO:
return ret;
default:
return 0;
}
}
/*
* We register ALL valid subchannels in ioinfo, even those
* that have been present before init_channel_subsystem.
* These subchannels can't have been registered yet (kmalloc
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
if (css_register_subchannel(sch)) {
if (!cio_is_console(schid))
put_device(&sch->dev);
}
return 0;
}
static void __init static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high) css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{ {
...@@ -1028,11 +963,10 @@ static int css_settle(struct device_driver *drv, void *unused) ...@@ -1028,11 +963,10 @@ static int css_settle(struct device_driver *drv, void *unused)
*/ */
static int __init channel_subsystem_init_sync(void) static int __init channel_subsystem_init_sync(void)
{ {
/* Allocate and register subchannels. */ /* Start initial subchannel evaluation. */
for_each_subchannel(setup_subchannel, NULL); css_schedule_eval_all();
/* Wait for the evaluation of subchannels to finish. */ /* Wait for the evaluation of subchannels to finish. */
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
/* Wait for the subchannel type specific initialization to finish */ /* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
} }
......
...@@ -120,3 +120,13 @@ int idset_is_empty(struct idset *set) ...@@ -120,3 +120,13 @@ int idset_is_empty(struct idset *set)
return 1; return 1;
return 0; return 0;
} }
void idset_add_set(struct idset *to, struct idset *from)
{
unsigned long i, len;
len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
__BITOPS_WORDS(from->num_ssid * from->num_id));
for (i = 0; i < len ; i++)
to->bitmap[i] |= from->bitmap[i];
}
...@@ -22,5 +22,6 @@ void idset_sch_del(struct idset *set, struct subchannel_id id); ...@@ -22,5 +22,6 @@ void idset_sch_del(struct idset *set, struct subchannel_id id);
int idset_sch_contains(struct idset *set, struct subchannel_id id); int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id); int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
int idset_is_empty(struct idset *set); int idset_is_empty(struct idset *set);
void idset_add_set(struct idset *to, struct idset *from);
#endif /* S390_IDSET_H */ #endif /* S390_IDSET_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment