Commit a4d26c6a authored by Stefan Weinhuber's avatar Stefan Weinhuber Committed by Martin Schwidefsky

[S390] dasd: do path verification for paths added at runtime

When a new path is added at runtime, the CIO layer will call the drivers
path_event callback. The DASD device driver uses this callback to trigger
a path verification for the new path. The driver will use only those
paths for I/O, which have been successfully verified.
Signed-off-by: default avatarStefan Weinhuber <wein@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent ef19298b
...@@ -913,6 +913,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) ...@@ -913,6 +913,11 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->startclk = get_clock(); cqr->startclk = get_clock();
cqr->starttime = jiffies; cqr->starttime = jiffies;
cqr->retries--; cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->lpm &= device->path_data.opm;
if (!cqr->lpm)
cqr->lpm = device->path_data.opm;
}
if (cqr->cpmode == 1) { if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm); (long) cqr, cqr->lpm);
...@@ -925,35 +930,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) ...@@ -925,35 +930,53 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->status = DASD_CQR_IN_IO; cqr->status = DASD_CQR_IN_IO;
break; break;
case -EBUSY: case -EBUSY:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: device busy, retry later"); "start_IO: device busy, retry later");
break; break;
case -ETIMEDOUT: case -ETIMEDOUT:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: request timeout, retry later"); "start_IO: request timeout, retry later");
break; break;
case -EACCES: case -EACCES:
/* -EACCES indicates that the request used only a /* -EACCES indicates that the request used only a subset of the
* subset of the available pathes and all these * available paths and all these paths are gone. If the lpm of
* pathes are gone. * this request was only a subset of the opm (e.g. the ppm) then
* Do a retry with all available pathes. * we just do a retry with all available paths.
* If we already use the full opm, something is amiss, and we
* need a full path verification.
*/ */
cqr->lpm = LPM_ANYPATH; if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected pathes gone," "start_IO: selected paths gone (%x)",
" retry on all pathes"); cqr->lpm);
} else if (cqr->lpm != device->path_data.opm) {
cqr->lpm = device->path_data.opm;
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone,"
" retry on all paths");
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: all paths in opm gone,"
" do path verification");
dasd_generic_last_path_gone(device);
device->path_data.opm = 0;
device->path_data.ppm = 0;
device->path_data.npm = 0;
device->path_data.tbvpm =
ccw_device_get_path_mask(device->cdev);
}
break; break;
case -ENODEV: case -ENODEV:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -ENODEV device gone, retry"); "start_IO: -ENODEV device gone, retry");
break; break;
case -EIO: case -EIO:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EIO device gone, retry"); "start_IO: -EIO device gone, retry");
break; break;
case -EINVAL: case -EINVAL:
/* most likely caused in power management context */ /* most likely caused in power management context */
DBF_DEV_EVENT(DBF_DEBUG, device, "%s", DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EINVAL device currently " "start_IO: -EINVAL device currently "
"not accessible"); "not accessible");
break; break;
...@@ -1175,12 +1198,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, ...@@ -1175,12 +1198,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
*/ */
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) { cqr->retries > 0) {
if (cqr->lpm == LPM_ANYPATH) if (cqr->lpm == device->path_data.opm)
DBF_DEV_EVENT(DBF_DEBUG, device, DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath " "default ERP in fastpath "
"(%i retries left)", "(%i retries left)",
cqr->retries); cqr->retries);
cqr->lpm = LPM_ANYPATH; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED;
next = cqr; next = cqr;
} else } else
...@@ -1364,8 +1388,14 @@ static void __dasd_device_start_head(struct dasd_device *device) ...@@ -1364,8 +1388,14 @@ static void __dasd_device_start_head(struct dasd_device *device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if (cqr->status != DASD_CQR_QUEUED) if (cqr->status != DASD_CQR_QUEUED)
return; return;
/* when device is stopped, return request to previous layer */ /* when device is stopped, return request to previous layer
if (device->stopped) { * exception: only the disconnect or unresumed bits are set and the
* cqr is a path verification request
*/
if (device->stopped &&
!(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
&& test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED; cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
return; return;
...@@ -1381,6 +1411,23 @@ static void __dasd_device_start_head(struct dasd_device *device) ...@@ -1381,6 +1411,23 @@ static void __dasd_device_start_head(struct dasd_device *device)
dasd_device_set_timer(device, 50); dasd_device_set_timer(device, 50);
} }
static void __dasd_device_check_path_events(struct dasd_device *device)
{
int rc;
if (device->path_data.tbvpm) {
if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
DASD_UNRESUMED_PM))
return;
rc = device->discipline->verify_path(
device, device->path_data.tbvpm);
if (rc)
dasd_device_set_timer(device, 50);
else
device->path_data.tbvpm = 0;
}
};
/* /*
* Go through all request on the dasd_device request queue, * Go through all request on the dasd_device request queue,
* terminate them on the cdev if necessary, and return them to the * terminate them on the cdev if necessary, and return them to the
...@@ -1455,6 +1502,7 @@ static void dasd_device_tasklet(struct dasd_device *device) ...@@ -1455,6 +1502,7 @@ static void dasd_device_tasklet(struct dasd_device *device)
__dasd_device_check_expire(device); __dasd_device_check_expire(device);
/* find final requests on ccw queue */ /* find final requests on ccw queue */
__dasd_device_process_ccw_queue(device, &final_queue); __dasd_device_process_ccw_queue(device, &final_queue);
__dasd_device_check_path_events(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev)); spin_unlock_irq(get_ccwdev_lock(device->cdev));
/* Now call the callback function of requests with final status */ /* Now call the callback function of requests with final status */
__dasd_device_process_final_queue(device, &final_queue); __dasd_device_process_final_queue(device, &final_queue);
...@@ -2586,10 +2634,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev) ...@@ -2586,10 +2634,53 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
return 0; return 0;
} }
int dasd_generic_last_path_gone(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
dev_warn(&device->cdev->dev, "No operational channel path is left "
"for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
/* First of all call extended error reporting. */
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
if (device->state < DASD_STATE_BASIC)
return 0;
/* Device is active. We want to keep it. */
list_for_each_entry(cqr, &device->ccw_queue, devlist)
if ((cqr->status == DASD_CQR_IN_IO) ||
(cqr->status == DASD_CQR_CLEAR_PENDING)) {
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
int dasd_generic_path_operational(struct dasd_device *device)
{
dev_info(&device->cdev->dev, "A channel path to the device has become "
"operational\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
if (device->stopped & DASD_UNRESUMED_PM) {
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
dasd_restore_device(device);
return 1;
}
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
int dasd_generic_notify(struct ccw_device *cdev, int event) int dasd_generic_notify(struct ccw_device *cdev, int event)
{ {
struct dasd_device *device; struct dasd_device *device;
struct dasd_ccw_req *cqr;
int ret; int ret;
device = dasd_device_from_cdev_locked(cdev); device = dasd_device_from_cdev_locked(cdev);
...@@ -2600,41 +2691,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) ...@@ -2600,41 +2691,64 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
case CIO_GONE: case CIO_GONE:
case CIO_BOXED: case CIO_BOXED:
case CIO_NO_PATH: case CIO_NO_PATH:
/* First of all call extended error reporting. */ device->path_data.opm = 0;
dasd_eer_write(device, NULL, DASD_EER_NOPATH); device->path_data.ppm = 0;
device->path_data.npm = 0;
if (device->state < DASD_STATE_BASIC) ret = dasd_generic_last_path_gone(device);
break;
/* Device is active. We want to keep it. */
list_for_each_entry(cqr, &device->ccw_queue, devlist)
if (cqr->status == DASD_CQR_IN_IO) {
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
ret = 1;
break; break;
case CIO_OPER: case CIO_OPER:
/* FIXME: add a sanity check. */
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
if (device->stopped & DASD_UNRESUMED_PM) {
dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
dasd_restore_device(device);
ret = 1;
break;
}
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
ret = 1; ret = 1;
if (device->path_data.opm)
ret = dasd_generic_path_operational(device);
break; break;
} }
dasd_put_device(device); dasd_put_device(device);
return ret; return ret;
} }
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
int chp;
__u8 oldopm, eventlpm;
struct dasd_device *device;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
for (chp = 0; chp < 8; chp++) {
eventlpm = 0x80 >> chp;
if (path_event[chp] & PE_PATH_GONE) {
oldopm = device->path_data.opm;
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
if (oldopm && !device->path_data.opm)
dasd_generic_last_path_gone(device);
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm;
device->path_data.tbvpm |= eventlpm;
dasd_schedule_device_bh(device);
}
}
dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
if (!device->path_data.opm && lpm) {
device->path_data.opm = lpm;
dasd_generic_path_operational(device);
} else
device->path_data.opm |= lpm;
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
int dasd_generic_pm_freeze(struct ccw_device *cdev) int dasd_generic_pm_freeze(struct ccw_device *cdev)
{ {
struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *cqr, *n;
......
...@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) ...@@ -152,9 +152,9 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
opm = ccw_device_get_path_mask(device->cdev); opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
//FIXME: start with get_opm ?
if (erp->lpm == 0) if (erp->lpm == 0)
erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); erp->lpm = device->path_data.opm &
~(erp->irb.esw.esw0.sublog.lpum);
else else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum); erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
...@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp) ...@@ -270,10 +270,11 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
{ {
erp->function = dasd_3990_erp_action_1; erp->function = dasd_3990_erp_action_1;
dasd_3990_erp_alternate_path(erp); dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) { if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED; erp->status = DASD_CQR_FILLED;
erp->retries = 10; erp->retries = 10;
erp->lpm = LPM_ANYPATH; erp->lpm = erp->startdev->path_data.opm;
erp->function = dasd_3990_erp_action_1_sec; erp->function = dasd_3990_erp_action_1_sec;
} }
return erp; return erp;
...@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense) ...@@ -1907,15 +1908,14 @@ dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
static void static void
dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense) dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
{ {
if (sense[25] & DASD_SENSE_BIT_3) { if (sense[25] & DASD_SENSE_BIT_3) {
dasd_3990_erp_alternate_path(erp); dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED) { if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to /* reset the lpm and the status to be able to
* try further actions. */ * try further actions. */
erp->lpm = erp->startdev->path_data.opm;
erp->lpm = 0;
erp->status = DASD_CQR_NEED_ERP; erp->status = DASD_CQR_NEED_ERP;
} }
} }
......
...@@ -639,6 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) ...@@ -639,6 +639,7 @@ dasd_put_device_wake(struct dasd_device *device)
{ {
wake_up(&dasd_delete_wq); wake_up(&dasd_delete_wq);
} }
EXPORT_SYMBOL_GPL(dasd_put_device_wake);
/* /*
* Return dasd_device structure associated with cdev. * Return dasd_device structure associated with cdev.
......
...@@ -619,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = { ...@@ -619,6 +619,7 @@ static struct dasd_discipline dasd_diag_discipline = {
.ebcname = "DIAG", .ebcname = "DIAG",
.max_blocks = DIAG_MAX_BLOCKS, .max_blocks = DIAG_MAX_BLOCKS,
.check_device = dasd_diag_check_device, .check_device = dasd_diag_check_device,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_diag_fill_geometry, .fill_geometry = dasd_diag_fill_geometry,
.start_IO = dasd_start_diag, .start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO, .term_IO = dasd_diag_term_IO,
......
This diff is collapsed.
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#define DASD_ECKD_CCW_PFX 0xE7 #define DASD_ECKD_CCW_PFX 0xE7
#define DASD_ECKD_CCW_PFX_READ 0xEA #define DASD_ECKD_CCW_PFX_READ 0xEA
#define DASD_ECKD_CCW_RSCK 0xF9 #define DASD_ECKD_CCW_RSCK 0xF9
#define DASD_ECKD_CCW_RCD 0xFA
/* /*
* Perform Subsystem Function / Sub-Orders * Perform Subsystem Function / Sub-Orders
...@@ -59,6 +60,7 @@ ...@@ -59,6 +60,7 @@
#define FCX_MAX_DATA_FACTOR 65536 #define FCX_MAX_DATA_FACTOR 65536
#define DASD_ECKD_RCD_DATA_SIZE 256
/***************************************************************************** /*****************************************************************************
...@@ -335,12 +337,6 @@ struct dasd_gneq { ...@@ -335,12 +337,6 @@ struct dasd_gneq {
__u8 reserved2[22]; __u8 reserved2[22];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct dasd_eckd_path {
__u8 opm;
__u8 ppm;
__u8 npm;
};
struct dasd_rssd_features { struct dasd_rssd_features {
char feature[256]; char feature[256];
} __attribute__((packed)); } __attribute__((packed));
...@@ -446,7 +442,6 @@ struct dasd_eckd_private { ...@@ -446,7 +442,6 @@ struct dasd_eckd_private {
struct vd_sneq *vdsneq; struct vd_sneq *vdsneq;
struct dasd_gneq *gneq; struct dasd_gneq *gneq;
struct dasd_eckd_path path_data;
struct eckd_count count_area[5]; struct eckd_count count_area[5];
int init_cqr_status; int init_cqr_status;
int uses_cdl; int uses_cdl;
......
...@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) ...@@ -96,7 +96,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
DBF_DEV_EVENT(DBF_DEBUG, device, DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)", "default ERP called (%i retries left)",
cqr->retries); cqr->retries);
cqr->lpm = LPM_ANYPATH; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = device->path_data.opm;
cqr->status = DASD_CQR_FILLED; cqr->status = DASD_CQR_FILLED;
} else { } else {
pr_err("%s: default ERP has run out of retries and failed\n", pr_err("%s: default ERP has run out of retries and failed\n",
......
...@@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = { ...@@ -73,6 +73,7 @@ static struct ccw_driver dasd_fba_driver = {
.set_offline = dasd_generic_set_offline, .set_offline = dasd_generic_set_offline,
.set_online = dasd_fba_set_online, .set_online = dasd_fba_set_online,
.notify = dasd_generic_notify, .notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
.freeze = dasd_generic_pm_freeze, .freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device, .thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device, .restore = dasd_generic_restore_device,
...@@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device) ...@@ -164,6 +165,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
} }
device->default_expires = DASD_EXPIRES; device->default_expires = DASD_EXPIRES;
device->path_data.opm = LPM_ANYPATH;
readonly = dasd_device_is_ro(device); readonly = dasd_device_is_ro(device);
if (readonly) if (readonly)
...@@ -596,6 +598,7 @@ static struct dasd_discipline dasd_fba_discipline = { ...@@ -596,6 +598,7 @@ static struct dasd_discipline dasd_fba_discipline = {
.max_blocks = 96, .max_blocks = 96,
.check_device = dasd_fba_check_characteristics, .check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis, .do_analysis = dasd_fba_do_analysis,
.verify_path = dasd_generic_verify_path,
.fill_geometry = dasd_fba_fill_geometry, .fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO, .start_IO = dasd_start_IO,
.term_IO = dasd_term_IO, .term_IO = dasd_term_IO,
......
...@@ -231,6 +231,7 @@ struct dasd_ccw_req { ...@@ -231,6 +231,7 @@ struct dasd_ccw_req {
/* per dasd_ccw_req flags */ /* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ #define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
#define DASD_CQR_VERIFY_PATH 2 /* path verification request */
/* Signature for error recovery functions. */ /* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
...@@ -286,6 +287,14 @@ struct dasd_discipline { ...@@ -286,6 +287,14 @@ struct dasd_discipline {
*/ */
int (*do_analysis) (struct dasd_block *); int (*do_analysis) (struct dasd_block *);
/*
* This function is called, when new paths become available.
* Disciplins may use this callback to do necessary setup work,
* e.g. verify that new path is compatible with the current
* configuration.
*/
int (*verify_path)(struct dasd_device *, __u8);
/* /*
* Last things to do when a device is set online, and first things * Last things to do when a device is set online, and first things
* when it is set offline. * when it is set offline.
...@@ -362,6 +371,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer; ...@@ -362,6 +371,13 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
#define DASD_EER_STATECHANGE 3 #define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4 #define DASD_EER_PPRCSUSPEND 4
struct dasd_path {
__u8 opm;
__u8 tbvpm;
__u8 ppm;
__u8 npm;
};
struct dasd_device { struct dasd_device {
/* Block device stuff. */ /* Block device stuff. */
struct dasd_block *block; struct dasd_block *block;
...@@ -377,6 +393,7 @@ struct dasd_device { ...@@ -377,6 +393,7 @@ struct dasd_device {
struct dasd_discipline *discipline; struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline; struct dasd_discipline *base_discipline;
char *private; char *private;
struct dasd_path path_data;
/* Device state and target state. */ /* Device state and target state. */
int state, target; int state, target;
...@@ -620,10 +637,15 @@ void dasd_generic_remove (struct ccw_device *cdev); ...@@ -620,10 +637,15 @@ void dasd_generic_remove (struct ccw_device *cdev);
int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int); int dasd_generic_notify(struct ccw_device *, int);
int dasd_generic_last_path_gone(struct dasd_device *);
int dasd_generic_path_operational(struct dasd_device *);
void dasd_generic_handle_state_change(struct dasd_device *); void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *); int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *); int dasd_generic_restore_device(struct ccw_device *);
enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
void dasd_generic_path_event(struct ccw_device *, int *);
int dasd_generic_verify_path(struct dasd_device *, __u8);
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *); char *dasd_get_sense(struct irb *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment