Commit 982bdf81 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

[S390] ccwreq: add ability to use all paths

Change the ccwrequest infrastructure to use more than one channel
path per start I/O. A flag "singlepath" is added to struct
ccw_request - if set, the old behavior is used. This flag is set
for all exploiters of the ccwrequest infrastructure - so there
is no functional change through this patch.
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 7cd40314
...@@ -38,9 +38,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev) ...@@ -38,9 +38,13 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
{ {
struct ccw_request *req = &cdev->private->req; struct ccw_request *req = &cdev->private->req;
if (!req->singlepath) {
req->mask = 0;
goto out;
}
req->retries = req->maxretries; req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask >>= 1, req->lpm); req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
out:
return req->mask; return req->mask;
} }
...@@ -113,8 +117,12 @@ void ccw_request_start(struct ccw_device *cdev) ...@@ -113,8 +117,12 @@ void ccw_request_start(struct ccw_device *cdev)
{ {
struct ccw_request *req = &cdev->private->req; struct ccw_request *req = &cdev->private->req;
/* Try all paths twice to counter link flapping. */ if (req->singlepath) {
req->mask = 0x8080; /* Try all paths twice to counter link flapping. */
req->mask = 0x8080;
} else
req->mask = req->lpm;
req->retries = req->maxretries; req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm); req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0; req->drc = 0;
......
...@@ -216,6 +216,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev) ...@@ -216,6 +216,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
req->timeout = SENSE_ID_TIMEOUT; req->timeout = SENSE_ID_TIMEOUT;
req->maxretries = SENSE_ID_RETRIES; req->maxretries = SENSE_ID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm; req->lpm = sch->schib.pmcw.pam & sch->opm;
req->singlepath = 1;
req->check = snsid_check; req->check = snsid_check;
req->callback = snsid_callback; req->callback = snsid_callback;
ccw_request_start(cdev); ccw_request_start(cdev);
......
...@@ -208,6 +208,7 @@ static void spid_start(struct ccw_device *cdev) ...@@ -208,6 +208,7 @@ static void spid_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT; req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES; req->maxretries = PGID_RETRIES;
req->lpm = 0x80; req->lpm = 0x80;
req->singlepath = 1;
req->callback = spid_callback; req->callback = spid_callback;
spid_do(cdev); spid_do(cdev);
} }
...@@ -420,6 +421,7 @@ static void verify_start(struct ccw_device *cdev) ...@@ -420,6 +421,7 @@ static void verify_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT; req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES; req->maxretries = PGID_RETRIES;
req->lpm = 0x80; req->lpm = 0x80;
req->singlepath = 1;
if (cdev->private->flags.pgroup) { if (cdev->private->flags.pgroup) {
CIO_TRACE_EVENT(4, "snid"); CIO_TRACE_EVENT(4, "snid");
CIO_HEX_EVENT(4, devid, sizeof(*devid)); CIO_HEX_EVENT(4, devid, sizeof(*devid));
...@@ -507,6 +509,7 @@ void ccw_device_disband_start(struct ccw_device *cdev) ...@@ -507,6 +509,7 @@ void ccw_device_disband_start(struct ccw_device *cdev)
req->timeout = PGID_TIMEOUT; req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES; req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm; req->lpm = sch->schib.pmcw.pam & sch->opm;
req->singlepath = 1;
req->callback = disband_callback; req->callback = disband_callback;
fn = SPID_FUNC_DISBAND; fn = SPID_FUNC_DISBAND;
if (cdev->private->flags.mpath) if (cdev->private->flags.mpath)
...@@ -560,6 +563,7 @@ void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1, ...@@ -560,6 +563,7 @@ void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
req->timeout = PGID_TIMEOUT; req->timeout = PGID_TIMEOUT;
req->maxretries = PGID_RETRIES; req->maxretries = PGID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm; req->lpm = sch->schib.pmcw.pam & sch->opm;
req->singlepath = 1;
req->data = data; req->data = data;
req->callback = stlck_callback; req->callback = stlck_callback;
stlck_build_cp(cdev, buf1, buf2); stlck_build_cp(cdev, buf1, buf2);
......
...@@ -92,11 +92,12 @@ enum io_status { ...@@ -92,11 +92,12 @@ enum io_status {
* @filter: optional callback to adjust request status based on IRB data * @filter: optional callback to adjust request status based on IRB data
* @callback: final callback * @callback: final callback
* @data: user-defined pointer passed to all callbacks * @data: user-defined pointer passed to all callbacks
* @singlepath: if set, use only one path from @lpm per start I/O
* @cancel: non-zero if request was cancelled
* @done: non-zero if request was finished
* @mask: current path mask * @mask: current path mask
* @retries: current number of retries * @retries: current number of retries
* @drc: delayed return code * @drc: delayed return code
* @cancel: non-zero if request was cancelled
* @done: non-zero if request was finished
*/ */
struct ccw_request { struct ccw_request {
struct ccw1 *cp; struct ccw1 *cp;
...@@ -108,12 +109,13 @@ struct ccw_request { ...@@ -108,12 +109,13 @@ struct ccw_request {
enum io_status); enum io_status);
void (*callback)(struct ccw_device *, void *, int); void (*callback)(struct ccw_device *, void *, int);
void *data; void *data;
unsigned int singlepath:1;
/* These fields are used internally. */ /* These fields are used internally. */
unsigned int cancel:1;
unsigned int done:1;
u16 mask; u16 mask;
u16 retries; u16 retries;
int drc; int drc;
int cancel:1;
int done:1;
} __attribute__((packed)); } __attribute__((packed));
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment