Commit 4257aaec authored by Peter Oberparleiter's avatar Peter Oberparleiter Committed by Martin Schwidefsky

[S390] cio: remove intretry flag

After changing all internal I/O functions to use the newly introduced
ccw request infrastructure, retries are handled automatically after a
clear operation. Therefore remove the internal retry flag and
associated code.
Signed-off-by: default avatarPeter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 350e9120
...@@ -1068,36 +1068,6 @@ static void io_subchannel_verify(struct subchannel *sch) ...@@ -1068,36 +1068,6 @@ static void io_subchannel_verify(struct subchannel *sch)
dev_fsm_event(cdev, DEV_EVENT_VERIFY); dev_fsm_event(cdev, DEV_EVENT_VERIFY);
} }
static int check_for_io_on_path(struct subchannel *sch, int mask)
{
if (cio_update_schib(sch))
return 0;
if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
return 1;
return 0;
}
static void terminate_internal_io(struct subchannel *sch,
struct ccw_device *cdev)
{
if (cio_clear(sch)) {
/* Recheck device in case clear failed. */
sch->lpm = 0;
if (cdev->online)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
else
css_schedule_eval(sch->schid);
return;
}
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
/* Request retry of internal operation. */
cdev->private->flags.intretry = 1;
/* Call handler. */
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{ {
struct ccw_device *cdev; struct ccw_device *cdev;
...@@ -1105,18 +1075,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) ...@@ -1105,18 +1075,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
cdev = sch_get_cdev(sch); cdev = sch_get_cdev(sch);
if (!cdev) if (!cdev)
return; return;
if (check_for_io_on_path(sch, mask)) { if (cio_update_schib(sch))
if (cdev->private->state == DEV_STATE_ONLINE) goto err;
ccw_device_kill_io(cdev); /* Check for I/O on path. */
else { if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
terminate_internal_io(sch, cdev); goto out;
/* Re-start path verification. */ if (cdev->private->state == DEV_STATE_ONLINE) {
dev_fsm_event(cdev, DEV_EVENT_VERIFY); ccw_device_kill_io(cdev);
} goto out;
} else }
/* trigger path verification. */ if (cio_clear(sch))
dev_fsm_event(cdev, DEV_EVENT_VERIFY); goto err;
out:
/* Trigger path verification. */
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return;
err:
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
} }
static int io_subchannel_chp_event(struct subchannel *sch, static int io_subchannel_chp_event(struct subchannel *sch,
......
...@@ -21,7 +21,6 @@ enum dev_state { ...@@ -21,7 +21,6 @@ enum dev_state {
DEV_STATE_DISBAND_PGID, DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED, DEV_STATE_BOXED,
/* states to wait for i/o completion before doing something */ /* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL, DEV_STATE_TIMEOUT_KILL,
DEV_STATE_QUIESCE, DEV_STATE_QUIESCE,
/* special states for devices gone not operational */ /* special states for devices gone not operational */
......
...@@ -771,12 +771,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -771,12 +771,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
*/ */
if (scsw_fctl(&irb->scsw) & if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
/* Retry Basic Sense if requested. */
if (cdev->private->flags.intretry) {
cdev->private->flags.intretry = 0;
ccw_device_do_sense(cdev, irb);
return;
}
cdev->private->flags.dosense = 0; cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb)); memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb); ccw_device_accumulate_irb(cdev, irb);
...@@ -799,21 +793,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -799,21 +793,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_online_verify(cdev, 0); ccw_device_online_verify(cdev, 0);
} }
static void
ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
irb = (struct irb *) __LC_IRB;
/* Accumulate status. We don't do basic sense. */
ccw_device_accumulate_irb(cdev, irb);
/* Remember to clear irb to avoid residuals. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try to start delayed device verification. */
ccw_device_online_verify(cdev, 0);
/* Note: Don't call handler for cio initiated clear! */
}
static void static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{ {
...@@ -1069,12 +1048,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { ...@@ -1069,12 +1048,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_VERIFY] = ccw_device_nop, [DEV_EVENT_VERIFY] = ccw_device_nop,
}, },
/* states to wait for i/o completion before doing something */ /* states to wait for i/o completion before doing something */
[DEV_STATE_CLEAR_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_TIMEOUT_KILL] = { [DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
......
...@@ -167,8 +167,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ...@@ -167,8 +167,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL; return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER) if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV; return -ENODEV;
if (cdev->private->state == DEV_STATE_VERIFY || if (cdev->private->state == DEV_STATE_VERIFY) {
cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
/* Remember to fake irb when finished. */ /* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) { if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = 1; cdev->private->flags.fake_irb = 1;
......
...@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) ...@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sense_ccw->count = SENSE_MAX_COUNT; sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI; sense_ccw->flags = CCW_FLAG_SLI;
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
rc = cio_start(sch, sense_ccw, 0xff); rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES) if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY); dev_fsm_event(cdev, DEV_EVENT_VERIFY);
......
...@@ -165,7 +165,6 @@ struct ccw_device_private { ...@@ -165,7 +165,6 @@ struct ccw_device_private {
unsigned int donotify:1; /* call notify function */ unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */ unsigned int recog_done:1; /* dev. recog. complete */
unsigned int fake_irb:1; /* deliver faked irb */ unsigned int fake_irb:1; /* deliver faked irb */
unsigned int intretry:1; /* retry internal operation */
unsigned int resuming:1; /* recognition while resume */ unsigned int resuming:1; /* recognition while resume */
unsigned int pgid_rdy:1; /* pgids are ready */ unsigned int pgid_rdy:1; /* pgids are ready */
} __attribute__((packed)) flags; } __attribute__((packed)) flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment