Commit fefe4ef5 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: common i/o layer update.

Common i/o layer fixes:
 - Fix for path no operational condition in cio_start.
 - Fix handling of user interruption parameter.
 - Add code to wait for devices in init_ccw_bus_type.
 - Move qdio states out of main cio state machine.
 - Reworked chsc data structures.
 - Add ccw_device_start_timeout.
 - Handle path verification required flag.
parent 71e25a79
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* drivers/s390/cio/airq.c * drivers/s390/cio/airq.c
* S/390 common I/O routines -- support for adapter interruptions * S/390 common I/O routines -- support for adapter interruptions
* *
* $Revision: 1.10 $ * $Revision: 1.11 $
* *
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -87,14 +87,14 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler) ...@@ -87,14 +87,14 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
} }
void void
do_adapter_IO (__u32 intparm) do_adapter_IO (void)
{ {
CIO_TRACE_EVENT (4, "doaio"); CIO_TRACE_EVENT (4, "doaio");
spin_lock (&adapter_lock); spin_lock (&adapter_lock);
if (adapter_handler) if (adapter_handler)
(*adapter_handler) (intparm); (*adapter_handler) ();
spin_unlock (&adapter_lock); spin_unlock (&adapter_lock);
......
#ifndef S390_AINTERRUPT_H #ifndef S390_AINTERRUPT_H
#define S390_AINTERRUPT_H #define S390_AINTERRUPT_H
typedef int (*adapter_int_handler_t)(__u32 intparm); typedef int (*adapter_int_handler_t)(void);
extern int s390_register_adapter_interrupt(adapter_int_handler_t handler); extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler); extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
extern void do_adapter_IO (__u32 intparm); extern void do_adapter_IO (void);
#endif #endif
/* /*
* drivers/s390/cio/chsc.c * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call * S/390 common I/O routines -- channel subsystem call
* $Revision: 1.57 $ * $Revision: 1.67 $
* *
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -76,53 +76,77 @@ static int ...@@ -76,53 +76,77 @@ static int
chsc_get_sch_desc_irq(int irq) chsc_get_sch_desc_irq(int irq)
{ {
int ccode, chpid, j; int ccode, chpid, j;
int ret;
/* FIXME: chsc_area_sei cannot be on the stack since it needs to struct {
* be page-aligned. Implement proper locking or dynamic struct chsc_header request;
* allocation or prove that this function does not have to be u16 reserved1;
* reentrant! */ u16 f_sch; /* first subchannel */
static struct ssd_area chsc_area_ssd u16 reserved2;
__attribute__ ((aligned(PAGE_SIZE))); u16 l_sch; /* last subchannel */
u32 reserved3;
typeof (chsc_area_ssd.response_block) struct chsc_header response;
*ssd_res = &chsc_area_ssd.response_block; u32 reserved4;
u8 sch_valid : 1;
chsc_area_ssd = (struct ssd_area) { u8 dev_valid : 1;
.request_block = { u8 st : 3; /* subchannel type */
.command_code1 = 0x0010, u8 zeroes : 3;
.command_code2 = 0x0004, u8 unit_addr; /* unit address */
.f_sch = irq, u16 devno; /* device number */
.l_sch = irq, u8 path_mask;
} u8 fla_valid_mask;
u16 sch; /* subchannel */
u8 chpid[8]; /* chpids 0-7 */
u16 fla[8]; /* full link addresses 0-7 */
} *ssd_area;
ssd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ssd_area) {
CIO_CRW_EVENT(0, "No memory for ssd area!\n");
return -ENOMEM;
}
ssd_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0004,
}; };
ccode = chsc(&chsc_area_ssd); ssd_area->f_sch = irq;
ssd_area->l_sch = irq;
ccode = chsc(ssd_area);
if (ccode > 0) { if (ccode > 0) {
pr_debug("chsc returned with ccode = %d\n", ccode); pr_debug("chsc returned with ccode = %d\n", ccode);
if (ccode == 3) ret = (ccode == 3) ? -ENODEV : -EBUSY;
return -ENODEV; goto out;
return -EBUSY;
} }
switch (chsc_area_ssd.response_block.response_code) { switch (ssd_area->response.code) {
case 0x0001: /* everything ok */ case 0x0001: /* everything ok */
ret = 0;
break; break;
case 0x0002: case 0x0002:
CIO_CRW_EVENT(2, "Invalid command!\n"); CIO_CRW_EVENT(2, "Invalid command!\n");
case 0x0003: case 0x0003:
CIO_CRW_EVENT(2, "Error in chsc request block!\n"); CIO_CRW_EVENT(2, "Error in chsc request block!\n");
return -EINVAL; ret = -EINVAL;
break;
case 0x0004: case 0x0004:
CIO_CRW_EVENT(2, "Model does not provide ssd\n"); CIO_CRW_EVENT(2, "Model does not provide ssd\n");
return -EOPNOTSUPP; ret = -EOPNOTSUPP;
break;
default: default:
CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
chsc_area_ssd.response_block.response_code); ssd_area->response.code);
return -EIO; ret = -EIO;
break;
} }
if (ret != 0)
goto out;
/* /*
* ssd_res->st stores the type of the detected * ssd_area->st stores the type of the detected
* subchannel, with the following definitions: * subchannel, with the following definitions:
* *
* 0: I/O subchannel: All fields have meaning * 0: I/O subchannel: All fields have meaning
...@@ -135,43 +159,45 @@ chsc_get_sch_desc_irq(int irq) ...@@ -135,43 +159,45 @@ chsc_get_sch_desc_irq(int irq)
* *
* Other types are currently undefined. * Other types are currently undefined.
*/ */
if (ssd_res->st > 3) { /* uhm, that looks strange... */ if (ssd_area->st > 3) { /* uhm, that looks strange... */
CIO_CRW_EVENT(0, "Strange subchannel type %d" CIO_CRW_EVENT(0, "Strange subchannel type %d"
" for sch %x\n", ssd_res->st, irq); " for sch %x\n", ssd_area->st, irq);
/* /*
* There may have been a new subchannel type defined in the * There may have been a new subchannel type defined in the
* time since this code was written; since we don't know which * time since this code was written; since we don't know which
* fields have meaning and what to do with it we just jump out * fields have meaning and what to do with it we just jump out
*/ */
return 0; goto out;
} else { } else {
const char type[4][8] = {"I/O", "chsc", "message", "ADM"}; const char *type[4] = {"I/O", "chsc", "message", "ADM"};
CIO_CRW_EVENT(6, "ssd: sch %x is %s subchannel\n", CIO_CRW_EVENT(6, "ssd: sch %x is %s subchannel\n",
irq, type[ssd_res->st]); irq, type[ssd_area->st]);
if (ioinfo[irq] == NULL) if (ioinfo[irq] == NULL)
/* FIXME: we should do device rec. here... */ /* FIXME: we should do device rec. here... */
return 0; goto out;
ioinfo[irq]->ssd_info.valid = 1; ioinfo[irq]->ssd_info.valid = 1;
ioinfo[irq]->ssd_info.type = ssd_res->st; ioinfo[irq]->ssd_info.type = ssd_area->st;
} }
if (ssd_res->st == 0 || ssd_res->st == 2) { if (ssd_area->st == 0 || ssd_area->st == 2) {
for (j = 0; j < 8; j++) { for (j = 0; j < 8; j++) {
if (!((0x80 >> j) & ssd_res->path_mask & if (!((0x80 >> j) & ssd_area->path_mask &
ssd_res->fla_valid_mask)) ssd_area->fla_valid_mask))
continue; continue;
chpid = ssd_res->chpid[j]; chpid = ssd_area->chpid[j];
if (chpid if (chpid
&& (!test_and_set_bit (chpid, chpids_known)) && (!test_and_set_bit (chpid, chpids_known))
&& (test_bit (chpid, chpids_logical))) && (test_bit (chpid, chpids_logical)))
set_bit (chpid, chpids); set_bit (chpid, chpids);
ioinfo[irq]->ssd_info.chpid[j] = chpid; ioinfo[irq]->ssd_info.chpid[j] = chpid;
ioinfo[irq]->ssd_info.fla[j] = ssd_res->fla[j]; ioinfo[irq]->ssd_info.fla[j] = ssd_area->fla[j];
} }
} }
return 0; out:
free_page ((unsigned long) ssd_area);
return ret;
} }
static int static int
...@@ -216,6 +242,7 @@ static inline void ...@@ -216,6 +242,7 @@ static inline void
s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid) s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid)
{ {
int j; int j;
int mask;
for (j = 0; j < 8; j++) for (j = 0; j < 8; j++)
if (sch->schib.pmcw.chpid[j] == chpid) if (sch->schib.pmcw.chpid[j] == chpid)
...@@ -223,16 +250,68 @@ s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid) ...@@ -223,16 +250,68 @@ s390_subchannel_remove_chpid(struct subchannel *sch, __u8 chpid)
if (j >= 8) if (j >= 8)
return; return;
mask = 0x80 >> j;
spin_lock(&sch->lock); spin_lock(&sch->lock);
chsc_validate_chpids(sch); chsc_validate_chpids(sch);
/* just to be sure... */ stsch(sch->irq, &sch->schib);
sch->lpm &= ~(0x80>>j); if (sch->vpm == mask) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
SCSW_ACTL_HALT_PEND |
SCSW_ACTL_START_PEND |
SCSW_ACTL_RESUME_PEND)) &&
(sch->schib.pmcw.lpum == mask)) {
int cc = cio_cancel(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
if (cc == -EINVAL) {
struct ccw_device *cdev;
cc = cio_clear(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data,
DEV_EVENT_NOTOPER);
goto out_unlock;
}
/* Call handler. */
cdev = sch->dev.driver_data;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
goto out_unlock;
}
} else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
(sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
(sch->schib.pmcw.lpum == mask)) {
struct ccw_device *cdev;
int cc;
cc = cio_clear(sch);
if (cc == -ENODEV) {
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_NOTOPER);
goto out_unlock;
}
/* Call handler. */
cdev = sch->dev.driver_data;
cdev->private->state = DEV_STATE_CLEAR_VERIFY;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
goto out_unlock;
}
/* trigger path verification. */ /* trigger path verification. */
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY); dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY);
out_unlock:
spin_unlock(&sch->lock); spin_unlock(&sch->lock);
} }
...@@ -265,7 +344,7 @@ s390_set_chpid_offline( __u8 chpid) ...@@ -265,7 +344,7 @@ s390_set_chpid_offline( __u8 chpid)
sch = ioinfo[irq]; sch = ioinfo[irq];
if (sch == NULL) if (sch == NULL)
continue; /* we don't know the device anyway */ continue; /* we don't know the device anyway */
/* FIXME: Kill pending I/O. */
s390_subchannel_remove_chpid(sch, chpid); s390_subchannel_remove_chpid(sch, chpid);
} }
#endif #endif
...@@ -349,7 +428,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) ...@@ -349,7 +428,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
if (!test_bit(chpid, chpids_logical)) if (!test_bit(chpid, chpids_logical))
return; /* no need to do the rest */ return; /* no need to do the rest */
for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) { for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int chp_mask; int chp_mask;
sch = ioinfo[irq]; sch = ioinfo[irq];
...@@ -369,7 +448,6 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) ...@@ -369,7 +448,6 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
continue; continue;
} }
/* FIXME: Kill pending I/O. */
spin_lock_irq(&sch->lock); spin_lock_irq(&sch->lock);
chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
...@@ -402,92 +480,97 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) ...@@ -402,92 +480,97 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
static void static void
do_process_crw(void *ignore) do_process_crw(void *ignore)
{ {
int do_sei; struct {
struct chsc_header request;
u32 reserved1;
u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 vf; /* validity flags */
u8 rs; /* reporting source */
u8 cc; /* content code */
u16 fla; /* full link address */
u16 rsid; /* reporting source id */
u32 reserved5;
u32 reserved6;
u32 ccdf; /* content-code dependent field */
u32 reserved7;
u32 reserved8;
u32 reserved9;
} *sei_area;
/* /*
* build the chsc request block for store event information * build the chsc request block for store event information
* and do the call * and do the call
*/ */
sei_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
/* FIXME: chsc_area_sei cannot be on the stack since it needs to if (!sei_area) {
* be page-aligned. Implement proper locking or dynamic CIO_CRW_EVENT(0, "No memory for sei area!\n");
* allocation or prove that this function does not have to be return;
* reentrant! */ }
static struct sei_area chsc_area_sei
__attribute__ ((aligned(PAGE_SIZE))) = {
.request_block = {
.command_code1 = 0x0010,
.command_code2 = 0x000e
}
};
typeof (chsc_area_sei.response_block)
*sei_res = &chsc_area_sei.response_block;
CIO_TRACE_EVENT( 2, "prcss"); CIO_TRACE_EVENT( 2, "prcss");
do_sei = 1; do {
while (do_sei) {
int ccode; int ccode;
memset(sei_area, 0, sizeof(*sei_area));
sei_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x000e,
};
ccode = chsc(&chsc_area_sei); ccode = chsc(sei_area);
if (ccode > 0) if (ccode > 0)
return; goto out;
switch (sei_res->response_code) { switch (sei_area->response.code) {
/* for debug purposes, check for problems */ /* for debug purposes, check for problems */
case 0x0001: case 0x0001:
CIO_CRW_EVENT(4, "chsc_process_crw: event information "
"successfully stored\n");
break; /* everything ok */ break; /* everything ok */
case 0x0002: case 0x0002:
CIO_CRW_EVENT(2, CIO_CRW_EVENT(2,
"chsc_process_crw: invalid command!\n"); "chsc_process_crw: invalid command!\n");
return; goto out;
case 0x0003: case 0x0003:
CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
"request block!\n"); "request block!\n");
return; goto out;
case 0x0005: case 0x0005:
CIO_CRW_EVENT(2, "chsc_process_crw: no event " CIO_CRW_EVENT(2, "chsc_process_crw: no event "
"information stored\n"); "information stored\n");
return; goto out;
default: default:
CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
sei_res->response_code); sei_area->response.code);
return; goto out;
} }
CIO_CRW_EVENT(4, "chsc_process_crw: event information "
"successfully stored\n");
/* Check if there is more event information pending. */
if (sei_res->flags & 0x80)
CIO_CRW_EVENT( 2, "chsc_process_crw: "
"further event information pending\n");
else
do_sei = 0;
/* Check if we might have lost some information. */ /* Check if we might have lost some information. */
if (sei_res->flags & 0x40) if (sei_area->flags & 0x40)
CIO_CRW_EVENT( 2, "chsc_process_crw: Event information " CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
"has been lost due to overflow!\n"); "has been lost due to overflow!\n");
if (sei_res->rs != 4) { if (sei_area->rs != 4) {
CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
"(%04X) isn't a chpid!\n", "(%04X) isn't a chpid!\n",
sei_res->rsid); sei_area->rsid);
continue; continue;
} }
/* which kind of information was stored? */ /* which kind of information was stored? */
switch (sei_res->cc) { switch (sei_area->cc) {
case 1: /* link incident*/ case 1: /* link incident*/
CIO_CRW_EVENT(4, "chsc_process_crw: " CIO_CRW_EVENT(4, "chsc_process_crw: "
"channel subsystem reports link incident," "channel subsystem reports link incident,"
" source is chpid %x\n", sei_res->rsid); " source is chpid %x\n", sei_area->rsid);
s390_set_chpid_offline(sei_res->rsid); s390_set_chpid_offline(sei_area->rsid);
break; break;
case 2: /* i/o resource accessibiliy */ case 2: /* i/o resource accessibiliy */
...@@ -495,27 +578,27 @@ do_process_crw(void *ignore) ...@@ -495,27 +578,27 @@ do_process_crw(void *ignore)
"channel subsystem reports some I/O " "channel subsystem reports some I/O "
"devices may have become accessible\n"); "devices may have become accessible\n");
pr_debug("Data received after sei: \n"); pr_debug("Data received after sei: \n");
pr_debug("Validity flags: %x\n", sei_res->vf); pr_debug("Validity flags: %x\n", sei_area->vf);
/* allocate a new channel path structure, if needed */ /* allocate a new channel path structure, if needed */
if (chps[sei_res->rsid] == NULL) if (chps[sei_area->rsid] == NULL)
new_channel_path(sei_res->rsid, CHP_ONLINE); new_channel_path(sei_area->rsid, CHP_ONLINE);
else else
set_chp_status(sei_res->rsid, CHP_ONLINE); set_chp_status(sei_area->rsid, CHP_ONLINE);
if ((sei_res->vf & 0x80) == 0) { if ((sei_area->vf & 0x80) == 0) {
pr_debug("chpid: %x\n", sei_res->rsid); pr_debug("chpid: %x\n", sei_area->rsid);
s390_process_res_acc(sei_res->rsid, 0, 0); s390_process_res_acc(sei_area->rsid, 0, 0);
} else if ((sei_res->vf & 0xc0) == 0x80) { } else if ((sei_area->vf & 0xc0) == 0x80) {
pr_debug("chpid: %x link addr: %x\n", pr_debug("chpid: %x link addr: %x\n",
sei_res->rsid, sei_res->fla); sei_area->rsid, sei_area->fla);
s390_process_res_acc(sei_res->rsid, s390_process_res_acc(sei_area->rsid,
sei_res->fla, 0xff00); sei_area->fla, 0xff00);
} else if ((sei_res->vf & 0xc0) == 0xc0) { } else if ((sei_area->vf & 0xc0) == 0xc0) {
pr_debug("chpid: %x full link addr: %x\n", pr_debug("chpid: %x full link addr: %x\n",
sei_res->rsid, sei_res->fla); sei_area->rsid, sei_area->fla);
s390_process_res_acc(sei_res->rsid, s390_process_res_acc(sei_area->rsid,
sei_res->fla, 0xffff); sei_area->fla, 0xffff);
} }
pr_debug("\n"); pr_debug("\n");
...@@ -523,15 +606,13 @@ do_process_crw(void *ignore) ...@@ -523,15 +606,13 @@ do_process_crw(void *ignore)
default: /* other stuff */ default: /* other stuff */
CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
sei_res->cc); sei_area->cc);
break; break;
} }
if (do_sei) { } while (sei_area->flags & 0x80);
memset(&chsc_area_sei, 0, sizeof(struct sei_area));
chsc_area_sei.request_block.command_code1 = 0x0010; out:
chsc_area_sei.request_block.command_code2 = 0x000e; free_page((unsigned long)sei_area);
}
}
} }
void void
...@@ -539,7 +620,7 @@ chsc_process_crw(void) ...@@ -539,7 +620,7 @@ chsc_process_crw(void)
{ {
static DECLARE_WORK(work, do_process_crw, 0); static DECLARE_WORK(work, do_process_crw, 0);
schedule_work(&work); queue_work(ccw_device_work, &work);
} }
static void static void
...@@ -555,7 +636,7 @@ chp_add(int chpid) ...@@ -555,7 +636,7 @@ chp_add(int chpid)
sprintf(dbf_txt, "cadd%x", chpid); sprintf(dbf_txt, "cadd%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt); CIO_TRACE_EVENT(2, dbf_txt);
for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) { for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
int i; int i;
sch = ioinfo[irq]; sch = ioinfo[irq];
...@@ -567,7 +648,6 @@ chp_add(int chpid) ...@@ -567,7 +648,6 @@ chp_add(int chpid)
continue; continue;
} }
/* FIXME: Kill pending I/O. */
spin_lock(&sch->lock); spin_lock(&sch->lock);
for (i=0; i<8; i++) for (i=0; i<8; i++)
if (sch->schib.pmcw.chpid[i] == chpid) { if (sch->schib.pmcw.chpid[i] == chpid) {
...@@ -599,26 +679,9 @@ chp_add(int chpid) ...@@ -599,26 +679,9 @@ chp_add(int chpid)
* Handling of crw machine checks with channel path source. * Handling of crw machine checks with channel path source.
*/ */
void void
chp_process_crw(int chpid) chp_process_crw(int chpid, int on)
{ {
/* if (on == 0) {
* Update our descriptions. We need this since we don't always
* get machine checks for path come and can't rely on our information
* being consistent otherwise.
*/
chsc_get_sch_descriptions();
if (!cio_chsc_desc_avail) {
/*
* Something went wrong...
* We can't reliably say whether a path was there before.
*/
CIO_CRW_EVENT(0, "Error: Could not retrieve "
"subchannel descriptions, will not process chp"
"machine check...\n");
return;
}
if (!test_bit(chpid, chpids)) {
/* Path has gone. We use the link incident routine.*/ /* Path has gone. We use the link incident routine.*/
s390_set_chpid_offline(chpid); s390_set_chpid_offline(chpid);
} else { } else {
...@@ -646,9 +709,6 @@ s390_vary_chpid( __u8 chpid, int on) ...@@ -646,9 +709,6 @@ s390_vary_chpid( __u8 chpid, int on)
struct subchannel *sch; struct subchannel *sch;
int irq; int irq;
if (chpid <=0 || chpid >= NR_CHPIDS)
return -EINVAL;
sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
CIO_TRACE_EVENT( 2, dbf_text); CIO_TRACE_EVENT( 2, dbf_text);
......
...@@ -12,85 +12,10 @@ ...@@ -12,85 +12,10 @@
#define CHSC_SEI_ACC_LINKADDR 2 #define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3 #define CHSC_SEI_ACC_FULLLINKADDR 3
struct sei_area { struct chsc_header {
struct { u16 length;
/* word 0 */ u16 code;
__u16 command_code1; };
__u16 command_code2;
/* word 1 */
__u32 reserved1;
/* word 2 */
__u32 reserved2;
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
/* word 2 */
__u8 flags;
__u8 vf; /* validity flags */
__u8 rs; /* reporting source */
__u8 cc; /* content code */
/* word 3 */
__u16 fla; /* full link address */
__u16 rsid; /* reporting source id */
/* word 4 */
__u32 reserved2;
/* word 5 */
__u32 reserved3;
/* word 6 */
__u32 ccdf; /* content-code dependent field */
/* word 7 */
__u32 reserved4;
/* word 8 */
__u32 reserved5;
/* word 9 */
__u32 reserved6;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE)));
struct ssd_area {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
/* word 1 */
__u16 reserved1;
__u16 f_sch; /* first subchannel */
/* word 2 */
__u16 reserved2;
__u16 l_sch; /* last subchannel */
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
/* word 2 */
__u8 sch_valid : 1;
__u8 dev_valid : 1;
__u8 st : 3; /* subchannel type */
__u8 zeroes : 3;
__u8 unit_addr; /* unit address */
__u16 devno; /* device number */
/* word 3 */
__u8 path_mask;
__u8 fla_valid_mask;
__u16 sch; /* subchannel */
/* words 4-5 */
__u8 chpid[8]; /* chpids 0-7 */
/* words 6-9 */
__u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE)));
struct channel_path { struct channel_path {
int id; int id;
......
/* /*
* drivers/s390/cio/cio.c * drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls * S/390 common I/O routines -- low level i/o calls
* $Revision: 1.91 $ * $Revision: 1.97 $
* *
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -176,13 +176,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) ...@@ -176,13 +176,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
CIO_TRACE_EVENT(0, dbf_text); CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
return -ENODEV; return (sch->lpm ? -EACCES : -ENODEV);
} }
int int
cio_start (struct subchannel *sch, /* subchannel structure */ cio_start (struct subchannel *sch, /* subchannel structure */
struct ccw1 * cpa, /* logical channel prog addr */ struct ccw1 * cpa, /* logical channel prog addr */
unsigned long intparm, /* interruption parameter */ unsigned int intparm, /* interruption parameter */
__u8 lpm) /* logical path mask */ __u8 lpm) /* logical path mask */
{ {
char dbf_txt[15]; char dbf_txt[15];
...@@ -191,7 +191,7 @@ cio_start (struct subchannel *sch, /* subchannel structure */ ...@@ -191,7 +191,7 @@ cio_start (struct subchannel *sch, /* subchannel structure */
sprintf (dbf_txt, "stIO%x", sch->irq); sprintf (dbf_txt, "stIO%x", sch->irq);
CIO_TRACE_EVENT (4, dbf_txt); CIO_TRACE_EVENT (4, dbf_txt);
sch->orb.intparm = (__u32) (long) &sch->u_intparm; sch->orb.intparm = intparm;
sch->orb.fmt = 1; sch->orb.fmt = 1;
sch->orb.pfch = sch->options.prefetch == 0; sch->orb.pfch = sch->options.prefetch == 0;
...@@ -219,7 +219,6 @@ cio_start (struct subchannel *sch, /* subchannel structure */ ...@@ -219,7 +219,6 @@ cio_start (struct subchannel *sch, /* subchannel structure */
/* /*
* initialize device status information * initialize device status information
*/ */
sch->u_intparm = intparm;
sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
return 0; return 0;
case 1: /* status pending */ case 1: /* status pending */
...@@ -265,13 +264,10 @@ cio_resume (struct subchannel *sch) ...@@ -265,13 +264,10 @@ cio_resume (struct subchannel *sch)
} }
/* /*
* Note: The "intparm" parameter is not used by the halt_IO() function * halt I/O operation
* itself, as no ORB is built for the HSCH instruction. However,
* it allows the device interrupt handler to associate the upcoming
* interrupt with the halt_IO() request.
*/ */
int int
cio_halt(struct subchannel *sch, unsigned long intparm) cio_halt(struct subchannel *sch)
{ {
char dbf_txt[15]; char dbf_txt[15];
int ccode; int ccode;
...@@ -297,7 +293,6 @@ cio_halt(struct subchannel *sch, unsigned long intparm) ...@@ -297,7 +293,6 @@ cio_halt(struct subchannel *sch, unsigned long intparm)
switch (ccode) { switch (ccode) {
case 0: case 0:
sch->u_intparm = intparm;
sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
return 0; return 0;
case 1: /* status pending */ case 1: /* status pending */
...@@ -309,13 +304,10 @@ cio_halt(struct subchannel *sch, unsigned long intparm) ...@@ -309,13 +304,10 @@ cio_halt(struct subchannel *sch, unsigned long intparm)
} }
/* /*
* Note: The "intparm" parameter is not used by the clear_IO() function * Clear I/O operation
* itself, as no ORB is built for the CSCH instruction. However,
* it allows the device interrupt handler to associate the upcoming
* interrupt with the clear_IO() request.
*/ */
int int
cio_clear(struct subchannel *sch, unsigned long intparm) cio_clear(struct subchannel *sch)
{ {
char dbf_txt[15]; char dbf_txt[15];
int ccode; int ccode;
...@@ -340,7 +332,6 @@ cio_clear(struct subchannel *sch, unsigned long intparm) ...@@ -340,7 +332,6 @@ cio_clear(struct subchannel *sch, unsigned long intparm)
switch (ccode) { switch (ccode) {
case 0: case 0:
sch->u_intparm = intparm;
sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
return 0; return 0;
default: /* device not operational */ default: /* device not operational */
...@@ -374,6 +365,8 @@ cio_cancel (struct subchannel *sch) ...@@ -374,6 +365,8 @@ cio_cancel (struct subchannel *sch)
switch (ccode) { switch (ccode) {
case 0: /* success */ case 0: /* success */
/* Update information in scsw. */
stsch (sch->irq, &sch->schib);
return 0; return 0;
case 1: /* status pending */ case 1: /* status pending */
return -EBUSY; return -EBUSY;
...@@ -620,7 +613,7 @@ do_IRQ (struct pt_regs regs) ...@@ -620,7 +613,7 @@ do_IRQ (struct pt_regs regs)
*/ */
if (tpi_info->adapter_IO == 1 && if (tpi_info->adapter_IO == 1 &&
tpi_info->int_type == IO_INTERRUPT_TYPE) { tpi_info->int_type == IO_INTERRUPT_TYPE) {
do_adapter_IO (tpi_info->intparm); do_adapter_IO();
continue; continue;
} }
sch = ioinfo[tpi_info->irq]; sch = ioinfo[tpi_info->irq];
......
...@@ -98,8 +98,6 @@ struct subchannel { ...@@ -98,8 +98,6 @@ struct subchannel {
__u8 vpm; /* verified path mask */ __u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */ __u8 lpm; /* logical path mask */
// TODO: intparm for second start i/o
unsigned long u_intparm; /* user interruption parameter */
struct schib schib; /* subchannel information block */ struct schib schib; /* subchannel information block */
struct orb orb; /* operation request block */ struct orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */ struct ccw1 sense_ccw; /* static ccw for sense command */
...@@ -116,11 +114,10 @@ extern int cio_validate_subchannel (struct subchannel *, unsigned int); ...@@ -116,11 +114,10 @@ extern int cio_validate_subchannel (struct subchannel *, unsigned int);
extern int cio_enable_subchannel (struct subchannel *, unsigned int); extern int cio_enable_subchannel (struct subchannel *, unsigned int);
extern int cio_disable_subchannel (struct subchannel *); extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *); extern int cio_cancel (struct subchannel *);
extern int cio_clear (struct subchannel *, unsigned long); extern int cio_clear (struct subchannel *);
extern int cio_do_io (struct subchannel *, struct ccw1 *, unsigned long, __u8);
extern int cio_resume (struct subchannel *); extern int cio_resume (struct subchannel *);
extern int cio_halt (struct subchannel *, unsigned long); extern int cio_halt (struct subchannel *);
extern int cio_start (struct subchannel *, struct ccw1 *, unsigned long, __u8); extern int cio_start (struct subchannel *, struct ccw1 *, unsigned int, __u8);
extern int cio_cancel (struct subchannel *); extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int); extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *); extern int cio_get_options (struct subchannel *);
......
/* /*
* drivers/s390/cio/css.c * drivers/s390/cio/css.c
* driver for channel subsystem * driver for channel subsystem
* $Revision: 1.40 $ * $Revision: 1.43 $
* *
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -41,7 +41,7 @@ css_alloc_subchannel(int irq) ...@@ -41,7 +41,7 @@ css_alloc_subchannel(int irq)
/* There already is a struct subchannel for this irq. */ /* There already is a struct subchannel for this irq. */
return -EBUSY; return -EBUSY;
sch = kmalloc (sizeof (*sch), GFP_DMA); sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL) if (sch == NULL)
return -ENOMEM; return -ENOMEM;
ret = cio_validate_subchannel (sch, irq); ret = cio_validate_subchannel (sch, irq);
...@@ -161,7 +161,7 @@ css_process_crw(int irq) ...@@ -161,7 +161,7 @@ css_process_crw(int irq)
sch = ioinfo[irq]; sch = ioinfo[irq];
if (sch == NULL) { if (sch == NULL) {
schedule_work(&work); queue_work(ccw_device_work, &work);
return; return;
} }
if (!sch->dev.driver_data) if (!sch->dev.driver_data)
...@@ -172,7 +172,7 @@ css_process_crw(int irq) ...@@ -172,7 +172,7 @@ css_process_crw(int irq)
ccode = stsch(irq, &sch->schib); ccode = stsch(irq, &sch->schib);
if (!ccode) if (!ccode)
if (devno != sch->schib.pmcw.dev) if (devno != sch->schib.pmcw.dev)
schedule_work(&work); queue_work(ccw_device_work, &work);
} }
/* /*
......
...@@ -79,6 +79,7 @@ struct ccw_device_private { ...@@ -79,6 +79,7 @@ struct ccw_device_private {
unsigned int esid:1; /* Ext. SenseID supported by HW */ unsigned int esid:1; /* Ext. SenseID supported by HW */
unsigned int dosense:1; /* delayed SENSE required */ unsigned int dosense:1; /* delayed SENSE required */
} __attribute__((packed)) flags; } __attribute__((packed)) flags;
unsigned long intparm; /* user interruption parameter */
struct qdio_irq *qdio_data; struct qdio_irq *qdio_data;
struct irb irb; /* device status */ struct irb irb; /* device status */
struct senseid senseid; /* SenseID info */ struct senseid senseid; /* SenseID info */
......
/* /*
* drivers/s390/cio/device.c * drivers/s390/cio/device.c
* bus driver for ccw devices * bus driver for ccw devices
* $Revision: 1.50 $ * $Revision: 1.53 $
* *
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation * IBM Corporation
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#include <asm/cio.h> #include <asm/cio.h>
...@@ -126,14 +127,32 @@ static struct css_driver io_subchannel_driver = { ...@@ -126,14 +127,32 @@ static struct css_driver io_subchannel_driver = {
.irq = io_subchannel_irq, .irq = io_subchannel_irq,
}; };
struct workqueue_struct *ccw_device_work;
static wait_queue_head_t ccw_device_init_wq;
static atomic_t ccw_device_init_count;
static int __init static int __init
init_ccw_bus_type (void) init_ccw_bus_type (void)
{ {
int ret; int ret;
init_waitqueue_head(&ccw_device_init_wq);
atomic_set(&ccw_device_init_count, 0);
ccw_device_work = create_workqueue("cio");
if (!ccw_device_work)
return -ENOMEM; /* FIXME: better errno ? */
if ((ret = bus_register (&ccw_bus_type))) if ((ret = bus_register (&ccw_bus_type)))
return ret; return ret;
return driver_register(&io_subchannel_driver.drv); if ((ret = driver_register(&io_subchannel_driver.drv)))
return ret;
wait_event(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
flush_workqueue(ccw_device_work);
return 0;
} }
static void __exit static void __exit
...@@ -141,6 +160,7 @@ cleanup_ccw_bus_type (void) ...@@ -141,6 +160,7 @@ cleanup_ccw_bus_type (void)
{ {
driver_unregister(&io_subchannel_driver.drv); driver_unregister(&io_subchannel_driver.drv);
bus_unregister(&ccw_bus_type); bus_unregister(&ccw_bus_type);
destroy_workqueue(ccw_device_work);
} }
subsys_initcall(init_ccw_bus_type); subsys_initcall(init_ccw_bus_type);
...@@ -360,7 +380,7 @@ ccw_device_release(struct device *dev) ...@@ -360,7 +380,7 @@ ccw_device_release(struct device *dev)
/* /*
* Register recognized device. * Register recognized device.
*/ */
void static void
io_subchannel_register(void *data) io_subchannel_register(void *data)
{ {
struct ccw_device *cdev; struct ccw_device *cdev;
...@@ -389,6 +409,42 @@ io_subchannel_register(void *data) ...@@ -389,6 +409,42 @@ io_subchannel_register(void *data)
put_device(&sch->dev); put_device(&sch->dev);
} }
/*
* subchannel recognition done. Called from the state machine.
*/
void
io_subchannel_recog_done(struct ccw_device *cdev)
{
struct subchannel *sch;
if (css_init_done == 0)
return;
switch (cdev->private->state) {
case DEV_STATE_NOT_OPER:
/* Remove device found not operational. */
sch = to_subchannel(cdev->dev.parent);
sch->dev.driver_data = 0;
put_device(&sch->dev);
if (cdev->dev.release)
cdev->dev.release(&cdev->dev);
break;
case DEV_STATE_OFFLINE:
/*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
INIT_WORK(&cdev->private->kick_work,
io_subchannel_register, (void *) cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
break;
case DEV_STATE_BOXED:
/* Device did not respond in time. */
break;
}
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
static void static void
io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{ {
...@@ -419,6 +475,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) ...@@ -419,6 +475,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
/* Do first half of device_register. */ /* Do first half of device_register. */
device_initialize(&cdev->dev); device_initialize(&cdev->dev);
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */ /* Start async. device sensing. */
spin_lock_irq(cdev->ccwlock); spin_lock_irq(cdev->ccwlock);
rc = ccw_device_recognition(cdev); rc = ccw_device_recognition(cdev);
...@@ -428,6 +487,8 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) ...@@ -428,6 +487,8 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
put_device(&sch->dev); put_device(&sch->dev);
if (cdev->dev.release) if (cdev->dev.release)
cdev->dev.release(&cdev->dev); cdev->dev.release(&cdev->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
} }
} }
...@@ -452,7 +513,8 @@ io_subchannel_probe (struct device *pdev) ...@@ -452,7 +513,8 @@ io_subchannel_probe (struct device *pdev)
if (!cdev) if (!cdev)
return -ENOMEM; return -ENOMEM;
memset(cdev, 0, sizeof(struct ccw_device)); memset(cdev, 0, sizeof(struct ccw_device));
cdev->private = kmalloc(sizeof(struct ccw_device_private), GFP_DMA); cdev->private = kmalloc(sizeof(struct ccw_device_private),
GFP_KERNEL | GFP_DMA);
if (!cdev->private) { if (!cdev->private) {
kfree(cdev); kfree(cdev);
return -ENOMEM; return -ENOMEM;
......
...@@ -14,13 +14,11 @@ enum dev_state { ...@@ -14,13 +14,11 @@ enum dev_state {
DEV_STATE_W4SENSE, DEV_STATE_W4SENSE,
DEV_STATE_DISBAND_PGID, DEV_STATE_DISBAND_PGID,
DEV_STATE_BOXED, DEV_STATE_BOXED,
/* special states for qdio */
DEV_STATE_QDIO_INIT,
DEV_STATE_QDIO_ACTIVE,
DEV_STATE_QDIO_CLEANUP,
/* states to wait for i/o completion before doing something */ /* states to wait for i/o completion before doing something */
DEV_STATE_ONLINE_VERIFY, DEV_STATE_ONLINE_VERIFY,
DEV_STATE_W4SENSE_VERIFY, DEV_STATE_W4SENSE_VERIFY,
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
/* last element! */ /* last element! */
NR_DEV_STATES NR_DEV_STATES
}; };
...@@ -63,7 +61,9 @@ dev_fsm_final_state(struct ccw_device *cdev) ...@@ -63,7 +61,9 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED); cdev->private->state == DEV_STATE_BOXED);
} }
void io_subchannel_register(void *data); extern struct workqueue_struct *ccw_device_work;
void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_recognition(struct ccw_device *); int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *); int ccw_device_online(struct ccw_device *);
......
...@@ -81,14 +81,14 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) ...@@ -81,14 +81,14 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
/* Stage 2: halt io. */ /* Stage 2: halt io. */
while (cdev->private->iretry-- > 0) while (cdev->private->iretry-- > 0)
if (cio_halt (sch, 0xC8C1D3E3) == 0) if (cio_halt (sch) == 0)
return -EBUSY; return -EBUSY;
/* halt io unsuccessful. */ /* halt io unsuccessful. */
cdev->private->iretry = 255; /* 255 clear retries. */ cdev->private->iretry = 255; /* 255 clear retries. */
} }
/* Stage 3: clear io. */ /* Stage 3: clear io. */
while (cdev->private->iretry-- > 0) while (cdev->private->iretry-- > 0)
if (cio_clear (sch, 0x40C3D3D9) == 0) if (cio_clear (sch) == 0)
return -EBUSY; return -EBUSY;
panic("Can't stop i/o on subchannel.\n"); panic("Can't stop i/o on subchannel.\n");
} }
...@@ -112,10 +112,6 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) ...@@ -112,10 +112,6 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
CIO_DEBUG(KERN_WARNING, 2, CIO_DEBUG(KERN_WARNING, 2,
"SenseID : unknown device %04X on subchannel %04X\n", "SenseID : unknown device %04X on subchannel %04X\n",
sch->schib.pmcw.dev, sch->irq); sch->schib.pmcw.dev, sch->irq);
sch->dev.driver_data = 0;
put_device(&sch->dev);
if (cdev->dev.release)
cdev->dev.release(&cdev->dev);
break; break;
case DEV_STATE_OFFLINE: case DEV_STATE_OFFLINE:
/* fill out sense information */ /* fill out sense information */
...@@ -131,11 +127,6 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) ...@@ -131,11 +127,6 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
"%04X/%02X\n", sch->schib.pmcw.dev, "%04X/%02X\n", sch->schib.pmcw.dev,
cdev->id.cu_type, cdev->id.cu_model, cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model); cdev->id.dev_type, cdev->id.dev_model);
if (css_init_done == 0)
break;
INIT_WORK(&cdev->private->kick_work,
io_subchannel_register, (void *) cdev);
schedule_work(&cdev->private->kick_work);
break; break;
case DEV_STATE_BOXED: case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2, CIO_DEBUG(KERN_WARNING, 2,
...@@ -143,6 +134,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) ...@@ -143,6 +134,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
sch->schib.pmcw.dev, sch->irq); sch->schib.pmcw.dev, sch->irq);
break; break;
} }
io_subchannel_recog_done(cdev);
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
} }
...@@ -219,9 +211,6 @@ ccw_device_recognition(struct ccw_device *cdev) ...@@ -219,9 +211,6 @@ ccw_device_recognition(struct ccw_device *cdev)
static void static void
ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{ {
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (ccw_device_cancel_halt_clear(cdev) == 0) if (ccw_device_cancel_halt_clear(cdev) == 0)
ccw_device_recog_done(cdev, DEV_STATE_BOXED); ccw_device_recog_done(cdev, DEV_STATE_BOXED);
else else
...@@ -349,9 +338,6 @@ ccw_device_offline(struct ccw_device *cdev) ...@@ -349,9 +338,6 @@ ccw_device_offline(struct ccw_device *cdev)
static void static void
ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{ {
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (ccw_device_cancel_halt_clear(cdev) == 0) if (ccw_device_cancel_halt_clear(cdev) == 0)
ccw_device_done(cdev, DEV_STATE_BOXED); ccw_device_done(cdev, DEV_STATE_BOXED);
else else
...@@ -393,8 +379,8 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -393,8 +379,8 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
// FIXME: not-oper indication to device driver ? // FIXME: not-oper indication to device driver ?
ccw_device_call_handler(cdev); ccw_device_call_handler(cdev);
} }
device_unregister(&cdev->dev);
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
device_unregister(&cdev->dev);
} }
/* /*
...@@ -438,14 +424,39 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -438,14 +424,39 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Accumulate status and find out if a basic sense is needed. */ /* Accumulate status and find out if a basic sense is needed. */
ccw_device_accumulate_irb(cdev, irb); ccw_device_accumulate_irb(cdev, irb);
if (cdev->private->flags.dosense) { if (cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE; /* Check if we have to trigger path verification. */
if (irb->esw.esw0.erw.pvrf)
cdev->private->state = DEV_STATE_W4SENSE_VERIFY;
else
cdev->private->state = DEV_STATE_W4SENSE;
}
return; return;
} }
if (irb->esw.esw0.erw.pvrf)
/* Try to start path verification. */
ccw_device_online_verify(cdev, 0);
/* No basic sense required, call the handler. */ /* No basic sense required, call the handler. */
ccw_device_call_handler(cdev); ccw_device_call_handler(cdev);
} }
/*
* Got an timeout in online state.
*/
static void
ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
if (ccw_device_cancel_halt_clear(cdev) != 0) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
static void static void
ccw_device_irq_verify(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_irq_verify(struct ccw_device *cdev, enum dev_event dev_event)
{ {
...@@ -491,11 +502,17 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -491,11 +502,17 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Add basic sense info to irb. */ /* Add basic sense info to irb. */
ccw_device_accumulate_basic_sense(cdev, irb); ccw_device_accumulate_basic_sense(cdev, irb);
if (cdev->private->flags.dosense) { if (cdev->private->flags.dosense) {
/* Check if we have to trigger path verification. */
if (irb->esw.esw0.erw.pvrf)
cdev->private->state = DEV_STATE_W4SENSE_VERIFY;
/* Another basic sense is needed. */ /* Another basic sense is needed. */
ccw_device_do_sense(cdev, irb); ccw_device_do_sense(cdev, irb);
return; return;
} }
cdev->private->state = DEV_STATE_ONLINE; cdev->private->state = DEV_STATE_ONLINE;
if (irb->esw.esw0.erw.pvrf)
/* Try to start path verification. */
ccw_device_online_verify(cdev, 0);
/* Call the handler. */ /* Call the handler. */
ccw_device_call_handler(cdev); ccw_device_call_handler(cdev);
} }
...@@ -527,103 +544,68 @@ ccw_device_w4sense_verify(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -527,103 +544,68 @@ ccw_device_w4sense_verify(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_call_handler(cdev); ccw_device_call_handler(cdev);
} }
/*
* No operation action. This is used e.g. to ignore a timeout event in
* state offline.
*/
static void static void
ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
}
/*
* Bug operation action.
*/
static void
ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
{
printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
cdev->private->state, dev_event);
BUG();
}
/*
* We've got an interrupt on establish queues. Check for errors and
* accordingly retry or move on.
*/
static void
ccw_device_qdio_init_irq(struct ccw_device *cdev, enum dev_event dev_event)
{ {
struct irb *irb; struct irb *irb;
struct subchannel *sch;
irb = (struct irb *) __LC_IRB; irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */ /* Check for unsolicited interrupt. */
if (irb->scsw.stctl == if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (cdev->private->qdio_data && if (cdev->handler)
cdev->private->qdio_data->establish_irq) cdev->handler (cdev, 0, irb);
cdev->private->qdio_data->establish_irq(cdev, 0, irb);
wake_up(&cdev->private->wait_q);
return; return;
} }
/* Accumulate status. We don't do basic sense. */
ccw_device_accumulate_irb(cdev, irb); ccw_device_accumulate_irb(cdev, irb);
//FIXME: Basic sense? /* Try to start delayed device verification. */
sch = to_subchannel(cdev->dev.parent); ccw_device_online_verify(cdev, 0);
if (cdev->private->qdio_data && cdev->private->qdio_data->establish_irq) /* Note: Don't call handler for cio initiated clear! */
cdev->private->qdio_data->establish_irq(cdev, sch->u_intparm,
&cdev->private->irb);
wake_up(&cdev->private->wait_q);
} }
/*
* Run into a timeout after establish queues, retry if needed.
*/
static void static void
ccw_device_qdio_init_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{ {
ccw_device_set_timeout(cdev, 0); /* OK, i/o is dead now. Call interrupt handler. */
if (cdev->private->qdio_data && cdev->private->state = DEV_STATE_ONLINE;
cdev->private->qdio_data->establish_timeout) if (cdev->handler)
cdev->private->qdio_data->establish_timeout(cdev); cdev->handler(cdev, cdev->private->intparm,
wake_up(&cdev->private->wait_q); ERR_PTR(-ETIMEDOUT));
} }
static void static void
ccw_device_qdio_cleanup_irq(struct ccw_device *cdev, ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
enum dev_event dev_event)
{ {
struct irb *irb; if (ccw_device_cancel_halt_clear(cdev) != 0) {
struct subchannel *sch; ccw_device_set_timeout(cdev, 3*HZ);
irb = (struct irb *) __LC_IRB;
/* Check for unsolicited interrupt. */
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (cdev->private->qdio_data &&
cdev->private->qdio_data->cleanup_irq)
cdev->private->qdio_data->cleanup_irq(cdev, 0, irb);
wake_up(&cdev->private->wait_q);
return; return;
} }
ccw_device_accumulate_irb(cdev, irb); //FIXME: Can we get here?
//FIXME: Basic sense? cdev->private->state = DEV_STATE_ONLINE;
sch = to_subchannel(cdev->dev.parent); if (cdev->handler)
if (cdev->private->qdio_data && cdev->private->qdio_data->cleanup_irq) cdev->handler(cdev, cdev->private->intparm,
cdev->private->qdio_data->cleanup_irq(cdev, sch->u_intparm, ERR_PTR(-ETIMEDOUT));
&cdev->private->irb);
wake_up(&cdev->private->wait_q);
} }
/*
* No operation action. This is used e.g. to ignore a timeout event in
* state offline.
*/
static void static void
ccw_device_qdio_cleanup_timeout(struct ccw_device *cdev, ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
enum dev_event dev_event)
{ {
ccw_device_set_timeout(cdev, 0); }
if (cdev->private->qdio_data &&
cdev->private->qdio_data->cleanup_timeout) /*
cdev->private->qdio_data->cleanup_timeout(cdev); * Bug operation action.
wake_up(&cdev->private->wait_q); */
static void
ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
{
printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
cdev->private->state, dev_event);
BUG();
} }
/* /*
...@@ -663,7 +645,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { ...@@ -663,7 +645,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_ONLINE] { [DEV_STATE_ONLINE] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper, [DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_irq, [DEV_EVENT_INTERRUPT] ccw_device_irq,
[DEV_EVENT_TIMEOUT] ccw_device_nop, [DEV_EVENT_TIMEOUT] ccw_device_online_timeout,
[DEV_EVENT_VERIFY] ccw_device_online_verify, [DEV_EVENT_VERIFY] ccw_device_online_verify,
}, },
[DEV_STATE_W4SENSE] { [DEV_STATE_W4SENSE] {
...@@ -684,25 +666,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { ...@@ -684,25 +666,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] ccw_device_nop, [DEV_EVENT_TIMEOUT] ccw_device_nop,
[DEV_EVENT_VERIFY] ccw_device_nop, [DEV_EVENT_VERIFY] ccw_device_nop,
}, },
/* special states for qdio */
[DEV_STATE_QDIO_INIT] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_qdio_init_irq,
[DEV_EVENT_TIMEOUT] ccw_device_qdio_init_timeout,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
[DEV_STATE_QDIO_ACTIVE] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_irq,
[DEV_EVENT_TIMEOUT] ccw_device_nop,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
[DEV_STATE_QDIO_CLEANUP] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_qdio_cleanup_irq,
[DEV_EVENT_TIMEOUT] ccw_device_qdio_cleanup_timeout,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
/* states to wait for i/o completion before doing something */ /* states to wait for i/o completion before doing something */
[DEV_STATE_ONLINE_VERIFY] { [DEV_STATE_ONLINE_VERIFY] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper, [DEV_EVENT_NOTOPER] ccw_device_online_notoper,
...@@ -716,6 +679,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { ...@@ -716,6 +679,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_TIMEOUT] ccw_device_nop, [DEV_EVENT_TIMEOUT] ccw_device_nop,
[DEV_EVENT_VERIFY] ccw_device_nop, [DEV_EVENT_VERIFY] ccw_device_nop,
}, },
[DEV_STATE_CLEAR_VERIFY] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_clear_verify,
[DEV_EVENT_TIMEOUT] ccw_device_nop,
[DEV_EVENT_VERIFY] ccw_device_nop,
},
[DEV_STATE_TIMEOUT_KILL] {
[DEV_EVENT_NOTOPER] ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] ccw_device_killing_irq,
[DEV_EVENT_TIMEOUT] ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] ccw_device_nop, //FIXME
},
}; };
/* /*
...@@ -736,3 +711,4 @@ io_subchannel_irq (struct device *pdev) ...@@ -736,3 +711,4 @@ io_subchannel_irq (struct device *pdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
} }
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
...@@ -198,11 +198,13 @@ __ccw_device_sense_id_start(struct ccw_device *cdev) ...@@ -198,11 +198,13 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
/* 0x00E2C9C4 == ebcdic "SID" */ /* 0x00E2C9C4 == ebcdic "SID" */
ret = cio_start (sch, cdev->private->iccws, ret = cio_start (sch, cdev->private->iccws,
0x00E2C9C4, cdev->private->imask); 0x00E2C9C4, cdev->private->imask);
/* ret is 0, -EBUSY or -ENODEV */ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EBUSY) if (ret == -EBUSY) {
udelay(100);
continue;
}
if (ret != -EACCES)
return ret; return ret;
udelay(100);
continue;
} }
cdev->private->imask >>= 1; cdev->private->imask >>= 1;
cdev->private->iretry = 5; cdev->private->iretry = 5;
......
...@@ -49,12 +49,15 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) ...@@ -49,12 +49,15 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE && if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE && cdev->private->state != DEV_STATE_W4SENSE &&
cdev->private->state != DEV_STATE_QDIO_ACTIVE) cdev->private->state != DEV_STATE_ONLINE_VERIFY &&
cdev->private->state != DEV_STATE_W4SENSE_VERIFY)
return -EINVAL; return -EINVAL;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch) if (!sch)
return -ENODEV; return -ENODEV;
ret = cio_clear(sch, intparm); ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret; return ret;
} }
...@@ -67,17 +70,35 @@ ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, ...@@ -67,17 +70,35 @@ ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE &&
cdev->private->state != DEV_STATE_QDIO_INIT)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch) if (!sch)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
sch->schib.scsw.actl != 0)
return -EBUSY;
ret = cio_set_options (sch, flags); ret = cio_set_options (sch, flags);
if (ret) if (ret)
return ret; return ret;
ret = cio_start (sch, cpa, intparm, lpm); /* 0xe4e2c5d9 == ebcdic "USER" */
ret = cio_start (sch, cpa, 0xe4e2c5d9, lpm);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
int
ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, unsigned long flags,
int expires)
{
int ret;
if (!cdev)
return -ENODEV;
ccw_device_set_timeout(cdev, expires);
ret = ccw_device_start(cdev, cpa, intparm, lpm, flags);
if (ret != 0)
ccw_device_set_timeout(cdev, 0);
return ret; return ret;
} }
...@@ -90,12 +111,16 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) ...@@ -90,12 +111,16 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE && if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE) cdev->private->state != DEV_STATE_W4SENSE &&
cdev->private->state != DEV_STATE_ONLINE_VERIFY &&
cdev->private->state != DEV_STATE_W4SENSE_VERIFY)
return -EINVAL; return -EINVAL;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch) if (!sch)
return -ENODEV; return -ENODEV;
ret = cio_halt(sch, intparm); ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret; return ret;
} }
...@@ -106,12 +131,12 @@ ccw_device_resume(struct ccw_device *cdev) ...@@ -106,12 +131,12 @@ ccw_device_resume(struct ccw_device *cdev)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch) if (!sch)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
return cio_resume(sch); return cio_resume(sch);
} }
...@@ -123,15 +148,6 @@ ccw_device_call_handler(struct ccw_device *cdev) ...@@ -123,15 +148,6 @@ ccw_device_call_handler(struct ccw_device *cdev)
{ {
struct subchannel *sch; struct subchannel *sch;
unsigned int stctl; unsigned int stctl;
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
if (cdev->private->state == DEV_STATE_QDIO_ACTIVE) {
if (cdev->private->qdio_data)
handler = cdev->private->qdio_data->handler;
else
handler = NULL;
} else
handler = cdev->handler;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
...@@ -154,8 +170,9 @@ ccw_device_call_handler(struct ccw_device *cdev) ...@@ -154,8 +170,9 @@ ccw_device_call_handler(struct ccw_device *cdev)
/* /*
* Now we are ready to call the device driver interrupt handler. * Now we are ready to call the device driver interrupt handler.
*/ */
if (handler) if (cdev->handler)
handler(cdev, sch->u_intparm, &cdev->private->irb); cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
/* /*
* Clear the old and now useless interrupt response block. * Clear the old and now useless interrupt response block.
...@@ -192,6 +209,11 @@ ccw_device_get_path_mask(struct ccw_device *cdev) ...@@ -192,6 +209,11 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
static void static void
ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
{ {
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (!IS_ERR(irb))
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(struct scsw));
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
} }
...@@ -218,8 +240,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) ...@@ -218,8 +240,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE && if (cdev->private->state != DEV_STATE_ONLINE)
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL; return -EINVAL;
if (!buffer || !length) if (!buffer || !length)
return -EINVAL; return -EINVAL;
...@@ -251,7 +272,13 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) ...@@ -251,7 +272,13 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
wait_event(cdev->private->wait_q, wait_event(cdev->private->wait_q,
sch->schib.scsw.actl == 0); sch->schib.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(&sch->lock, flags);
/* FIXME: Check if we got sensible stuff. */ /* Check at least for channel end / device end */
if ((sch->schib.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(sch->schib.scsw.cstat != 0)) {
ret = -EIO;
continue;
}
break; break;
} }
} }
...@@ -281,8 +308,7 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length) ...@@ -281,8 +308,7 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE && if (cdev->private->state != DEV_STATE_ONLINE)
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL; return -EINVAL;
if (cdev->private->flags.esid == 0) if (cdev->private->flags.esid == 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -300,7 +326,7 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length) ...@@ -300,7 +326,7 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
if (!ciw || ciw->cmd == 0) if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
rcd_buf = kmalloc(ciw->count, GFP_DMA); rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
if (!rcd_buf) if (!rcd_buf)
return -ENOMEM; return -ENOMEM;
memset (rcd_buf, 0, ciw->count); memset (rcd_buf, 0, ciw->count);
...@@ -325,7 +351,13 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length) ...@@ -325,7 +351,13 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(&sch->lock, flags);
wait_event(cdev->private->wait_q, sch->schib.scsw.actl == 0); wait_event(cdev->private->wait_q, sch->schib.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(&sch->lock, flags);
/* FIXME: Check if we got sensible stuff. */ /* Check at least for channel end / device end */
if ((sch->schib.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
(sch->schib.scsw.cstat != 0)) {
ret = -EIO;
continue;
}
break; break;
} }
/* Restore interrupt handler. */ /* Restore interrupt handler. */
...@@ -363,13 +395,15 @@ _ccw_device_get_device_number(struct ccw_device *cdev) ...@@ -363,13 +395,15 @@ _ccw_device_get_device_number(struct ccw_device *cdev)
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear); EXPORT_SYMBOL(ccw_device_clear);
EXPORT_SYMBOL(ccw_device_halt); EXPORT_SYMBOL(ccw_device_halt);
EXPORT_SYMBOL(ccw_device_resume); EXPORT_SYMBOL(ccw_device_resume);
EXPORT_SYMBOL(ccw_device_start_timeout);
EXPORT_SYMBOL(ccw_device_start); EXPORT_SYMBOL(ccw_device_start);
EXPORT_SYMBOL(ccw_device_get_ciw); EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask); EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL (read_conf_data); EXPORT_SYMBOL(read_conf_data);
EXPORT_SYMBOL (read_dev_chars); EXPORT_SYMBOL(read_dev_chars);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number); EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
EXPORT_SYMBOL(_ccw_device_get_device_number); EXPORT_SYMBOL(_ccw_device_get_device_number);
...@@ -51,14 +51,23 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) ...@@ -51,14 +51,23 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
/* 0xe2d5c9c4 == ebcdic "SNID" */ /* 0xe2d5c9c4 == ebcdic "SNID" */
ret = cio_start (sch, cdev->private->iccws, ret = cio_start (sch, cdev->private->iccws,
0xE2D5C9C4, cdev->private->imask); 0xE2D5C9C4, cdev->private->imask);
/* ret is 0, -EBUSY or -ENODEV */ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EBUSY) if (ret == -EBUSY) {
CIO_MSG_EVENT(2,
"SNID - device %04X, start_io() "
"reports rc : %d, retrying ...\n",
sch->schib.pmcw.dev, ret);
udelay(100);
continue;
}
if (ret != -EACCES)
return ret; return ret;
CIO_MSG_EVENT(2, "SNID - device %04X, start_io() " CIO_MSG_EVENT(2, "SNID - Device %04X on Subchannel "
"reports rc : %d, retrying ...\n", "%04X, lpm %02X, became 'not "
sch->schib.pmcw.dev, ret); "operational'\n",
udelay(100); sch->schib.pmcw.dev, sch->irq,
continue; cdev->private->imask);
} }
cdev->private->imask >>= 1; cdev->private->imask >>= 1;
cdev->private->iretry = 5; cdev->private->iretry = 5;
...@@ -231,7 +240,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) ...@@ -231,7 +240,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
/* 0xE2D7C9C4 == ebcdic "SPID" */ /* 0xE2D7C9C4 == ebcdic "SPID" */
ret = cio_start (sch, cdev->private->iccws, ret = cio_start (sch, cdev->private->iccws,
0xE2D7C9C4, cdev->private->imask); 0xE2D7C9C4, cdev->private->imask);
/* ret is 0, -EBUSY or -ENODEV */ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret == -EACCES)
break;
if (ret != -EBUSY) if (ret != -EBUSY)
return ret; return ret;
udelay(100); udelay(100);
......
...@@ -167,7 +167,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) ...@@ -167,7 +167,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
/* Copy authorization bit. */ /* Copy authorization bit. */
cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
/* Copy path verification required flag. FIXME: how to verify ?? */ /* Copy path verification required flag. */
cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
/* Copy concurrent sense bit. */ /* Copy concurrent sense bit. */
cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
...@@ -309,7 +309,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) ...@@ -309,7 +309,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch->sense_ccw.flags = CCW_FLAG_SLI; sch->sense_ccw.flags = CCW_FLAG_SLI;
/* 0xe2C5D5E2 == "SENS" in ebcdic */ /* 0xe2C5D5E2 == "SENS" in ebcdic */
return cio_start (sch, &sch->sense_ccw, 0xE2C5D5E2, 0); return cio_start (sch, &sch->sense_ccw, 0xE2C5D5E2, 0xff);
} }
/* /*
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/version.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
...@@ -54,8 +53,9 @@ ...@@ -54,8 +53,9 @@
#include "airq.h" #include "airq.h"
#include "qdio.h" #include "qdio.h"
#include "ioasm.h" #include "ioasm.h"
#include "chsc.h"
#define VERSION_QDIO_C "$Revision: 1.34 $" #define VERSION_QDIO_C "$Revision: 1.48 $"
/****************** MODULE PARAMETER VARIABLES ********************/ /****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
...@@ -87,7 +87,6 @@ static debug_info_t *qdio_dbf_slsb_out; ...@@ -87,7 +87,6 @@ static debug_info_t *qdio_dbf_slsb_out;
static debug_info_t *qdio_dbf_slsb_in; static debug_info_t *qdio_dbf_slsb_in;
#endif /* QDIO_DBF_LIKE_HELL */ #endif /* QDIO_DBF_LIKE_HELL */
static struct qdio_chsc_area *chsc_area;
/* iQDIO stuff: */ /* iQDIO stuff: */
static volatile struct qdio_q *iq_list=NULL; /* volatile as it could change static volatile struct qdio_q *iq_list=NULL; /* volatile as it could change
during a while loop */ during a while loop */
...@@ -611,16 +610,13 @@ inline static int ...@@ -611,16 +610,13 @@ inline static int
iqdio_is_inbound_q_done(struct qdio_q *q) iqdio_is_inbound_q_done(struct qdio_q *q)
{ {
int no_used; int no_used;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[15]; char dbf_text[15];
#endif /* QDIO_DBF_LIKE_HELL */
no_used=atomic_read(&q->number_of_buffers_used); no_used=atomic_read(&q->number_of_buffers_used);
/* propagate the change from 82 to 80 through VM */ /* propagate the change from 82 to 80 through VM */
SYNC_MEMORY; SYNC_MEMORY;
#ifdef QDIO_DBF_LIKE_HELL
if (no_used) { if (no_used) {
sprintf(dbf_text,"iqisnt%02x",no_used); sprintf(dbf_text,"iqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
...@@ -628,7 +624,6 @@ iqdio_is_inbound_q_done(struct qdio_q *q) ...@@ -628,7 +624,6 @@ iqdio_is_inbound_q_done(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"iniqisdo"); QDIO_DBF_TEXT4(0,trace,"iniqisdo");
} }
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
#endif /* QDIO_DBF_LIKE_HELL */
if (!no_used) if (!no_used)
return 1; return 1;
...@@ -664,9 +659,7 @@ inline static int ...@@ -664,9 +659,7 @@ inline static int
qdio_is_inbound_q_done(struct qdio_q *q) qdio_is_inbound_q_done(struct qdio_q *q)
{ {
int no_used; int no_used;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[15]; char dbf_text[15];
#endif /* QDIO_DBF_LIKE_HELL */
no_used=atomic_read(&q->number_of_buffers_used); no_used=atomic_read(&q->number_of_buffers_used);
...@@ -677,11 +670,9 @@ qdio_is_inbound_q_done(struct qdio_q *q) ...@@ -677,11 +670,9 @@ qdio_is_inbound_q_done(struct qdio_q *q)
SYNC_MEMORY; SYNC_MEMORY;
if (!no_used) { if (!no_used) {
#ifdef QDIO_DBF_LIKE_HELL
QDIO_DBF_TEXT4(0,trace,"inqisdnA"); QDIO_DBF_TEXT4(0,trace,"inqisdnA");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
return 1; return 1;
} }
...@@ -703,20 +694,16 @@ qdio_is_inbound_q_done(struct qdio_q *q) ...@@ -703,20 +694,16 @@ qdio_is_inbound_q_done(struct qdio_q *q)
* has (probably) not moved (see qdio_inbound_processing) * has (probably) not moved (see qdio_inbound_processing)
*/ */
if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
#ifdef QDIO_DBF_LIKE_HELL
QDIO_DBF_TEXT4(0,trace,"inqisdon"); QDIO_DBF_TEXT4(0,trace,"inqisdon");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
return 1; return 1;
} else { } else {
#ifdef QDIO_DBF_LIKE_HELL
QDIO_DBF_TEXT4(0,trace,"inqisntd"); QDIO_DBF_TEXT4(0,trace,"inqisntd");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
return 0; return 0;
} }
} }
...@@ -725,12 +712,10 @@ inline static void ...@@ -725,12 +712,10 @@ inline static void
qdio_kick_inbound_handler(struct qdio_q *q) qdio_kick_inbound_handler(struct qdio_q *q)
{ {
int count, start, end, real_end, i; int count, start, end, real_end, i;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[15]; char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"kickinh"); QDIO_DBF_TEXT4(0,trace,"kickinh");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
#endif /* QDIO_DBF_LIKE_HELL */
start=q->first_element_to_kick; start=q->first_element_to_kick;
real_end=q->first_to_check; real_end=q->first_to_check;
...@@ -744,10 +729,8 @@ qdio_kick_inbound_handler(struct qdio_q *q) ...@@ -744,10 +729,8 @@ qdio_kick_inbound_handler(struct qdio_q *q)
i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
} }
#ifdef QDIO_DBF_LIKE_HELL
sprintf(dbf_text,"s=%2xc=%2x",start,count); sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
if (q->state==QDIO_IRQ_STATE_ACTIVE) if (q->state==QDIO_IRQ_STATE_ACTIVE)
q->handler(q->cdev, q->handler(q->cdev,
...@@ -950,13 +933,10 @@ inline static int ...@@ -950,13 +933,10 @@ inline static int
qdio_is_outbound_q_done(struct qdio_q *q) qdio_is_outbound_q_done(struct qdio_q *q)
{ {
int no_used; int no_used;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[15]; char dbf_text[15];
#endif /* QDIO_DBF_LIKE_HELL */
no_used=atomic_read(&q->number_of_buffers_used); no_used=atomic_read(&q->number_of_buffers_used);
#ifdef QDIO_DBF_LIKE_HELL
if (no_used) { if (no_used) {
sprintf(dbf_text,"oqisnt%02x",no_used); sprintf(dbf_text,"oqisnt%02x",no_used);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
...@@ -964,7 +944,6 @@ qdio_is_outbound_q_done(struct qdio_q *q) ...@@ -964,7 +944,6 @@ qdio_is_outbound_q_done(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"oqisdone"); QDIO_DBF_TEXT4(0,trace,"oqisdone");
} }
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
#endif /* QDIO_DBF_LIKE_HELL */
return (no_used==0); return (no_used==0);
} }
...@@ -1015,10 +994,7 @@ inline static void ...@@ -1015,10 +994,7 @@ inline static void
qdio_kick_outbound_handler(struct qdio_q *q) qdio_kick_outbound_handler(struct qdio_q *q)
{ {
int start, end, real_end, count; int start, end, real_end, count;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[15]; char dbf_text[15];
#endif /* QDIO_DBF_LIKE_HELL */
start = q->first_element_to_kick; start = q->first_element_to_kick;
/* last_move_ftc was just updated */ /* last_move_ftc was just updated */
...@@ -1031,10 +1007,8 @@ qdio_kick_outbound_handler(struct qdio_q *q) ...@@ -1031,10 +1007,8 @@ qdio_kick_outbound_handler(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"kickouth"); QDIO_DBF_TEXT4(0,trace,"kickouth");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
#ifdef QDIO_DBF_LIKE_HELL
sprintf(dbf_text,"s=%2xc=%2x",start,count); sprintf(dbf_text,"s=%2xc=%2x",start,count);
QDIO_DBF_TEXT4(0,trace,dbf_text); QDIO_DBF_TEXT4(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
if (q->state==QDIO_IRQ_STATE_ACTIVE) if (q->state==QDIO_IRQ_STATE_ACTIVE)
q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
...@@ -1239,8 +1213,6 @@ qdio_release_irq_memory(struct qdio_irq *irq_ptr) ...@@ -1239,8 +1213,6 @@ qdio_release_irq_memory(struct qdio_irq *irq_ptr)
if (irq_ptr->qdr) if (irq_ptr->qdr)
kfree(irq_ptr->qdr); kfree(irq_ptr->qdr);
kfree(irq_ptr); kfree(irq_ptr);
QDIO_DBF_TEXT3(0,setup,"MOD_DEC_");
MOD_DEC_USE_COUNT;
} }
static void static void
...@@ -1482,7 +1454,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr, ...@@ -1482,7 +1454,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr,
} }
static int static int
iqdio_thinint_handler(__u32 intparm) iqdio_thinint_handler(void)
{ {
QDIO_DBF_TEXT4(0,trace,"thin_int"); QDIO_DBF_TEXT4(0,trace,"thin_int");
...@@ -1500,7 +1472,7 @@ iqdio_thinint_handler(__u32 intparm) ...@@ -1500,7 +1472,7 @@ iqdio_thinint_handler(__u32 intparm)
} }
static void static void
qdio_set_state(struct qdio_irq *irq_ptr,int state) qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
{ {
int i; int i;
char dbf_text[15]; char dbf_text[15];
...@@ -1570,23 +1542,90 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) ...@@ -1570,23 +1542,90 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
} }
} }
static void qdio_establish_handle_irq(struct ccw_device*, int, int);
static inline void
qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
int cstat, int dstat)
{
struct qdio_irq *irq_ptr;
struct qdio_q *q;
char dbf_text[15];
irq_ptr = cdev->private->qdio_data;
QDIO_DBF_TEXT2(1, trace, "ick2");
sprintf(dbf_text,"%s", cdev->dev.bus_id);
QDIO_DBF_TEXT2(1,trace,dbf_text);
QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
QDIO_PRINT_ERR("received check condition on activate " \
"queues on device %s (cs=x%x, ds=x%x).\n",
cdev->dev.bus_id, cstat, dstat);
if (irq_ptr->no_input_qs) {
q=irq_ptr->input_qs[0];
} else if (irq_ptr->no_output_qs) {
q=irq_ptr->output_qs[0];
} else {
QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
cdev->dev.bus_id);
goto omit_handler_call;
}
q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
QDIO_STATUS_LOOK_FOR_ERROR,
0,0,0,-1,-1,q->int_parm);
omit_handler_call:
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
}
static void
qdio_timeout_handler(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
char dbf_text[15];
QDIO_DBF_TEXT2(0, trace, "qtoh");
sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT2(0, trace, dbf_text);
irq_ptr = cdev->private->qdio_data;
sprintf(dbf_text, "state:%d", irq_ptr->state);
QDIO_DBF_TEXT2(0, trace, dbf_text);
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
irq_ptr->irq);
QDIO_DBF_TEXT2(1,setup,"eq:timeo");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_CLEANUP:
QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
irq_ptr->irq);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
default:
BUG();
}
wake_up(&cdev->private->wait_q);
}
static void static void
qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{ {
struct qdio_irq *irq_ptr; struct qdio_irq *irq_ptr;
struct qdio_q *q;
int cstat,dstat; int cstat,dstat;
char dbf_text[15]; char dbf_text[15];
cstat = irb->scsw.cstat;
dstat = irb->scsw.dstat;
QDIO_DBF_TEXT4(0, trace, "qint"); QDIO_DBF_TEXT4(0, trace, "qint");
sprintf(dbf_text, "%s", cdev->dev.bus_id); sprintf(dbf_text, "%s", cdev->dev.bus_id);
QDIO_DBF_TEXT4(0, trace, dbf_text); QDIO_DBF_TEXT4(0, trace, dbf_text);
if (!intparm || !cdev) { if (!intparm) {
QDIO_PRINT_STUPID("got unsolicited interrupt in qdio " \ QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
"handler, device %s\n", cdev->dev.bus_id); "handler, device %s\n", cdev->dev.bus_id);
return; return;
} }
...@@ -1601,39 +1640,58 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) ...@@ -1601,39 +1640,58 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
return; return;
} }
if (IS_ERR(irb)) {
/* Currently running i/o is in error. */
switch (PTR_ERR(irb)) {
case -EIO:
QDIO_PRINT_ERR("i/o error on device %s\n",
cdev->dev.bus_id);
//FIXME: hm?
return;
case -ETIMEDOUT:
qdio_timeout_handler(cdev);
return;
default:
QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
PTR_ERR(irb), cdev->dev.bus_id);
return;
}
}
qdio_irq_check_sense(irq_ptr->irq, irb); qdio_irq_check_sense(irq_ptr->irq, irb);
if (cstat & SCHN_STAT_PCI) { sprintf(dbf_text, "state:%d", irq_ptr->state);
qdio_handle_pci(irq_ptr); QDIO_DBF_TEXT4(0, trace, dbf_text);
return;
}
if ((cstat&~SCHN_STAT_PCI)||dstat) { cstat = irb->scsw.cstat;
QDIO_DBF_TEXT2(1, trace, "ick2"); dstat = irb->scsw.dstat;
sprintf(dbf_text,"%s", cdev->dev.bus_id);
QDIO_DBF_TEXT2(1,trace,dbf_text); switch (irq_ptr->state) {
QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); case QDIO_IRQ_STATE_INACTIVE:
QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); qdio_establish_handle_irq(cdev, cstat, dstat);
QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); break;
QDIO_PRINT_ERR("received check condition on activate " \
"queues on device %s (cs=x%x, ds=x%x).\n", case QDIO_IRQ_STATE_CLEANUP:
cdev->dev.bus_id, cstat, dstat); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
if (irq_ptr->no_input_qs) { break;
q=irq_ptr->input_qs[0];
} else if (irq_ptr->no_output_qs) { case QDIO_IRQ_STATE_ESTABLISHED:
q=irq_ptr->output_qs[0]; case QDIO_IRQ_STATE_ACTIVE:
} else { if (cstat & SCHN_STAT_PCI) {
QDIO_PRINT_ERR("oops... no queue registered for " \ qdio_handle_pci(irq_ptr);
"device %s!?\n", cdev->dev.bus_id); break;
goto omit_handler_call;
} }
q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
QDIO_STATUS_LOOK_FOR_ERROR, if ((cstat&~SCHN_STAT_PCI)||dstat) {
0,0,0,-1,-1,q->int_parm); qdio_handle_activate_check(cdev, intparm, cstat, dstat);
omit_handler_call: break;
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); }
return; default:
QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
"device %s?!\n",
irq_ptr->state, cdev->dev.bus_id);
} }
wake_up(&cdev->private->wait_q);
} }
...@@ -1680,60 +1738,108 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, ...@@ -1680,60 +1738,108 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
static unsigned char static unsigned char
qdio_check_siga_needs(int sch) qdio_check_siga_needs(int sch)
{ {
int resp_code,result; int result;
unsigned char qdioac;
struct {
struct chsc_header request;
u16 reserved1;
u16 first_sch;
u16 reserved2;
u16 last_sch;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 reserved6;
u8 qdioac;
u8 sch_class;
u8 reserved7;
u8 icnt;
u8 reserved8;
u8 ocnt;
} *ssqd_area;
ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
"SIGAs for sch x%x.\n", sch);
return -1; /* all flags set */
}
ssqd_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
memset(chsc_area,0,sizeof(struct qdio_chsc_area)); ssqd_area->first_sch = sch;
chsc_area->request_block.command_code1=0x0010; /* length */ ssqd_area->last_sch = sch;
chsc_area->request_block.command_code2=0x0024; /* op code */
chsc_area->request_block.first_sch=sch;
chsc_area->request_block.last_sch=sch;
result=chsc(chsc_area); result=chsc(ssqd_area);
if (result) { if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
"SIGAs for sch x%x.\n", "SIGAs for sch x%x.\n",
result,sch); result,sch);
return -1; /* all flags set */ qdioac = -1; /* all flags set */
goto out;
} }
resp_code=chsc_area->request_block.operation_data_area. if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
store_qdio_data_response.response_code;
if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon checking SIGA needs " \ QDIO_PRINT_WARN("response upon checking SIGA needs " \
"is 0x%x. Using all SIGAs for sch x%x.\n", "is 0x%x. Using all SIGAs for sch x%x.\n",
resp_code,sch); ssqd_area->response.code, sch);
return -1; /* all flags set */ qdioac = -1; /* all flags set */
goto out;
} }
if ( if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
(!(chsc_area->request_block.operation_data_area. !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
store_qdio_data_response.flags&CHSC_FLAG_QDIO_CAPABILITY)) || (ssqd_area->sch != sch)) {
(!(chsc_area->request_block.operation_data_area.
store_qdio_data_response.flags&CHSC_FLAG_VALIDITY)) ||
(chsc_area->request_block.operation_data_area.
store_qdio_data_response.sch!=sch)
) {
QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
"using all SIGAs.\n",sch); "using all SIGAs.\n",sch);
return CHSC_FLAG_SIGA_INPUT_NECESSARY | qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY | CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
goto out;
} }
return chsc_area->request_block.operation_data_area. qdioac = ssqd_area->qdioac;
store_qdio_data_response.qdioac; out:
free_page ((unsigned long) ssqd_area);
return qdioac;
} }
/* the chsc_area is locked by the lock in qdio_activate */
static unsigned int static unsigned int
iqdio_check_chsc_availability(void) { iqdio_check_chsc_availability(void)
{
int result; int result;
int i;
memset(chsc_area,0,sizeof(struct qdio_chsc_area)); struct {
chsc_area->request_block.command_code1=0x0010; struct chsc_header request;
chsc_area->request_block.command_code2=0x0010; u32 reserved1;
result=chsc(chsc_area); u32 reserved2;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u32 general_char[510];
u32 chsc_char[518];
} *scsc_area;
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsc_area) {
QDIO_PRINT_WARN("Was not able to determine available" \
"CHSCs due to no memory.\n");
return -ENOMEM;
}
scsc_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0010,
};
result=chsc(scsc_area);
if (result) { if (result) {
QDIO_PRINT_WARN("Was not able to determine " \ QDIO_PRINT_WARN("Was not able to determine " \
"available CHSCs, cc=%i.\n", "available CHSCs, cc=%i.\n",
...@@ -1741,10 +1847,8 @@ iqdio_check_chsc_availability(void) { ...@@ -1741,10 +1847,8 @@ iqdio_check_chsc_availability(void) {
result=-EIO; result=-EIO;
goto exit; goto exit;
} }
result=0;
i=chsc_area->request_block.operation_data_area. if (scsc_area->response.code != 1) {
store_qdio_data_response.response_code;
if (i!=1) {
QDIO_PRINT_WARN("Was not able to determine " \ QDIO_PRINT_WARN("Was not able to determine " \
"available CHSCs.\n"); "available CHSCs.\n");
result=-EIO; result=-EIO;
...@@ -1753,24 +1857,24 @@ iqdio_check_chsc_availability(void) { ...@@ -1753,24 +1857,24 @@ iqdio_check_chsc_availability(void) {
/* 4: request block /* 4: request block
* 2: general char * 2: general char
* 512: chsc char */ * 512: chsc char */
if ( (*(((unsigned int*)(chsc_area))+4+2+1)&0x00800000)!=0x00800000) { if ((scsc_area->general_char[1] & 0x00800000) != 0x00800000) {
QDIO_PRINT_WARN("Adapter interruption facility not " \ QDIO_PRINT_WARN("Adapter interruption facility not " \
"installed.\n"); "installed.\n");
result=-ENOENT; result=-ENOENT;
goto exit; goto exit;
} }
if ( (*(((unsigned int*)(chsc_area))+4+512+3)&0x00180000)!= if ((scsc_area->chsc_char[2] & 0x00180000) != 0x00180000) {
0x00180000 ) {
QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
"not available.\n"); "not available.\n");
result=-ENOENT; result=-ENOENT;
goto exit; goto exit;
} }
exit: exit:
free_page ((unsigned long) scsc_area);
return result; return result;
} }
/* the chsc_area is locked by the lock in qdio_activate */
static unsigned int static unsigned int
iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
{ {
...@@ -1782,7 +1886,27 @@ iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) ...@@ -1782,7 +1886,27 @@ iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
unsigned int resp_code; unsigned int resp_code;
int result; int result;
if (!irq_ptr->is_iqdio_irq) return -ENODEV; struct {
struct chsc_header request;
u16 operation_code;
u16 reserved1;
u32 reserved2;
u32 reserved3;
u64 summary_indicator_addr;
u64 subchannel_indicator_addr;
u32 ks:4;
u32 kc:4;
u32 reserved4:21;
u32 isc:3;
u32 reserved5[2];
u32 subsystem_id;
u32 reserved6[1004];
struct chsc_header response;
u32 reserved7;
} *scssc_area;
if (!irq_ptr->is_iqdio_irq)
return -ENODEV;
if (reset_to_zero) { if (reset_to_zero) {
real_addr_local_summary_bit=0; real_addr_local_summary_bit=0;
...@@ -1794,52 +1918,57 @@ iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) ...@@ -1794,52 +1918,57 @@ iqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
} }
memset(chsc_area,0,sizeof(struct qdio_chsc_area)); scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
chsc_area->request_block.command_code1=0x0fe0; if (!scssc_area) {
chsc_area->request_block.command_code2=0x0021; QDIO_PRINT_WARN("No memory for setting indicators on " \
chsc_area->request_block.operation_code=0; "subchannel x%x.\n", irq_ptr->irq);
chsc_area->request_block.image_id=0; return -ENOMEM;
}
chsc_area->request_block.operation_data_area.set_chsc. scssc_area->request = (struct chsc_header) {
summary_indicator_addr=real_addr_local_summary_bit; .length = 0x0fe0,
chsc_area->request_block.operation_data_area.set_chsc. .code = 0x0021,
subchannel_indicator_addr=real_addr_dev_st_chg_ind; };
chsc_area->request_block.operation_data_area.set_chsc. scssc_area->operation_code = 0;
ks=QDIO_STORAGE_KEY;
chsc_area->request_block.operation_data_area.set_chsc. scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
kc=QDIO_STORAGE_KEY; scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
chsc_area->request_block.operation_data_area.set_chsc. scssc_area->ks = QDIO_STORAGE_KEY;
isc=IQDIO_THININT_ISC; scssc_area->kc = QDIO_STORAGE_KEY;
chsc_area->request_block.operation_data_area.set_chsc. scssc_area->isc = IQDIO_THININT_ISC;
subsystem_id=(1<<16)+irq_ptr->irq; scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
result=chsc(chsc_area); result = chsc(scssc_area);
if (result) { if (result) {
QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
"cc=%i.\n",irq_ptr->irq,result); "cc=%i.\n",irq_ptr->irq,result);
return -EIO; result = -EIO;
goto out;
} }
resp_code=chsc_area->response_block.response_code; resp_code = scssc_area->response.code;
if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon setting indicators " \ QDIO_PRINT_WARN("response upon setting indicators " \
"is 0x%x.\n",resp_code); "is 0x%x.\n",resp_code);
sprintf(dbf_text,"sidR%4x",resp_code); sprintf(dbf_text,"sidR%4x",resp_code);
QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT1(0,setup,dbf_text); QDIO_DBF_TEXT1(0,setup,dbf_text);
ptr=&chsc_area->response_block; ptr=&scssc_area->response;
QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
return -EIO; result = -EIO;
goto out;
} }
QDIO_DBF_TEXT2(0,setup,"setscind"); QDIO_DBF_TEXT2(0,setup,"setscind");
QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
sizeof(unsigned long)); sizeof(unsigned long));
QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
return 0; result = 0;
out:
free_page ((unsigned long) scssc_area);
return result;
} }
/* chsc_area would have to be locked if called from outside qdio_activate */
static unsigned int static unsigned int
iqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) iqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
{ {
...@@ -1848,34 +1977,59 @@ iqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) ...@@ -1848,34 +1977,59 @@ iqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
void *ptr; void *ptr;
char dbf_text[15]; char dbf_text[15];
if (!irq_ptr->is_iqdio_irq) return -ENODEV; struct {
struct chsc_header request;
u16 operation_code;
u16 reserved1;
u32 reserved2;
u32 reserved3;
u32 reserved4[2];
u32 delay_target;
u32 reserved5[1009];
struct chsc_header response;
u32 reserved6;
} *scsscf_area;
if (!irq_ptr->is_iqdio_irq)
return -ENODEV;
memset(chsc_area,0,sizeof(struct qdio_chsc_area)); scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
chsc_area->request_block.command_code1=0x0fe0; if (!scsscf_area) {
chsc_area->request_block.command_code2=0x1027; QDIO_PRINT_WARN("No memory for setting delay target on " \
chsc_area->request_block.operation_data_area.set_chsc_fast. "subchannel x%x.\n", irq_ptr->irq);
delay_target=delay_target<<16; return -ENOMEM;
}
scsscf_area->request = (struct chsc_header) {
.length = 0x0fe0,
.code = 0x1027,
};
result=chsc(chsc_area); scsscf_area->delay_target = delay_target<<16;
result=chsc(scsscf_area);
if (result) { if (result) {
QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
"cc=%i. Continuing.\n",irq_ptr->irq,result); "cc=%i. Continuing.\n",irq_ptr->irq,result);
return -EIO; result = -EIO;
goto out;
} }
resp_code=chsc_area->response_block.response_code; resp_code = scsscf_area->response.code;
if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon setting delay target " \ QDIO_PRINT_WARN("response upon setting delay target " \
"is 0x%x. Continuing.\n",resp_code); "is 0x%x. Continuing.\n",resp_code);
sprintf(dbf_text,"sdtR%4x",resp_code); sprintf(dbf_text,"sdtR%4x",resp_code);
QDIO_DBF_TEXT1(0,trace,dbf_text); QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT1(0,setup,dbf_text); QDIO_DBF_TEXT1(0,setup,dbf_text);
ptr=&chsc_area->response_block; ptr=&scsscf_area->response;
QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
} }
QDIO_DBF_TEXT2(0,trace,"delytrgt"); QDIO_DBF_TEXT2(0,trace,"delytrgt");
QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
return 0; result = 0; /* not critical */
out:
free_page ((unsigned long) scsscf_area);
return result;
} }
int int
...@@ -1972,19 +2126,22 @@ qdio_shutdown(struct ccw_device *cdev, int how) ...@@ -1972,19 +2126,22 @@ qdio_shutdown(struct ccw_device *cdev, int how)
ccw_device_halt(cdev, QDIO_DOING_CLEANUP); ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
timeout=QDIO_CLEANUP_HALT_TIMEOUT; timeout=QDIO_CLEANUP_HALT_TIMEOUT;
} }
cdev->private->state = DEV_STATE_QDIO_CLEANUP; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
ccw_device_set_timeout(cdev, timeout); ccw_device_set_timeout(cdev, timeout);
spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); wait_event(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR);
/* Ignore errors. */
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
out: out:
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
return result; return result;
} }
static inline void static inline void
qdio_cleanup_finish(struct qdio_irq *irq_ptr) qdio_cleanup_finish(struct ccw_device *cdev, struct qdio_irq *irq_ptr)
{ {
if (irq_ptr->is_iqdio_irq) { if (irq_ptr->is_iqdio_irq) {
qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
...@@ -1992,6 +2149,10 @@ qdio_cleanup_finish(struct qdio_irq *irq_ptr) ...@@ -1992,6 +2149,10 @@ qdio_cleanup_finish(struct qdio_irq *irq_ptr)
/* reset adapter interrupt indicators */ /* reset adapter interrupt indicators */
} }
/* exchange int handlers, if necessary */
if ((void*)cdev->handler == (void*)qdio_handler)
cdev->handler=irq_ptr->original_int_handler;
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
} }
...@@ -2014,51 +2175,16 @@ qdio_free(struct ccw_device *cdev) ...@@ -2014,51 +2175,16 @@ qdio_free(struct ccw_device *cdev)
if (cdev->private->state != DEV_STATE_ONLINE) if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL; return -EINVAL;
qdio_cleanup_finish(irq_ptr); qdio_cleanup_finish(cdev, irq_ptr);
cdev->private->qdio_data = 0; cdev->private->qdio_data = 0;
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
qdio_release_irq_memory(irq_ptr); qdio_release_irq_memory(irq_ptr);
module_put(THIS_MODULE);
return 0; return 0;
} }
static void
qdio_cleanup_handle_timeout(struct ccw_device *cdev)
{
unsigned long flags;
struct qdio_irq *irq_ptr;
irq_ptr = cdev->private->qdio_data;
spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
irq_ptr->irq);
spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
cdev->private->state = DEV_STATE_ONLINE;
wake_up(&cdev->private->wait_q);
}
static void
qdio_cleanup_handle_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct qdio_irq *irq_ptr;
if (intparm == 0)
QDIO_PRINT_WARN("Got unsolicited interrupt on cleanup "
"(irq 0x%x).\n", cdev->private->irq);
irq_ptr = cdev->private->qdio_data;
qdio_irq_check_sense(irq_ptr->irq, irb);
cdev->private->state = DEV_STATE_ONLINE;
wake_up(&cdev->private->wait_q);
}
static inline void static inline void
qdio_allocate_do_dbf(struct qdio_initialize *init_data) qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{ {
...@@ -2134,24 +2260,6 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, ...@@ -2134,24 +2260,6 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
} }
void
qdio_establish_handle_timeout(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr;
irq_ptr = cdev->private->qdio_data;
QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
irq_ptr->irq);
QDIO_DBF_TEXT2(1,setup,"eq:timeo");
/*
* FIXME:
* this is broken,
* we are in the context of a timer interrupt and
* qdio_shutdown calls schedule
*/
qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
}
static inline void static inline void
qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
...@@ -2208,7 +2316,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, ...@@ -2208,7 +2316,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_PRINT_ERR("received check condition on establish " \ QDIO_PRINT_ERR("received check condition on establish " \
"queues on irq 0x%x (cs=x%x, ds=x%x).\n", "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
irq_ptr->irq,cstat,dstat); irq_ptr->irq,cstat,dstat);
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
} }
if (!(dstat & DEV_STAT_DEV_END)) { if (!(dstat & DEV_STAT_DEV_END)) {
...@@ -2218,13 +2326,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, ...@@ -2218,13 +2326,7 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
"device end: dstat=%02x, cstat=%02x\n", "device end: dstat=%02x, cstat=%02x\n",
irq_ptr->irq, dstat, cstat); irq_ptr->irq, dstat, cstat);
/* qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
* FIXME:
* this is broken,
* we are probably in the context of an i/o interrupt and
* qdio_shutdown calls schedule
*/
qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
return 1; return 1;
} }
...@@ -2236,36 +2338,27 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, ...@@ -2236,36 +2338,27 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
"the following devstat: dstat=%02x, " "the following devstat: dstat=%02x, "
"cstat=%02x\n", "cstat=%02x\n",
irq_ptr->irq, dstat, cstat); irq_ptr->irq, dstat, cstat);
cdev->private->state = DEV_STATE_ONLINE; qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1; return 1;
} }
return 0; return 0;
} }
static void static void
qdio_establish_handle_irq(struct ccw_device *cdev, unsigned long intparm, qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
struct irb *irb)
{ {
struct qdio_irq *irq_ptr; struct qdio_irq *irq_ptr;
int cstat, dstat;
char dbf_text[15]; char dbf_text[15];
cstat = irb->scsw.cstat; sprintf(dbf_text,"qehi%4x",cdev->private->irq);
dstat = irb->scsw.dstat; QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
irq_ptr = cdev->private->qdio_data;
if (intparm == 0) {
QDIO_PRINT_WARN("Got unsolicited interrupt on establish "
"queues (irq 0x%x).\n", cdev->private->irq);
return;
}
qdio_irq_check_sense(irq_ptr->irq, irb);
if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
return; return;
irq_ptr = cdev->private->qdio_data;
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
else else
...@@ -2287,6 +2380,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, unsigned long intparm, ...@@ -2287,6 +2380,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, unsigned long intparm,
qdio_initialize_set_siga_flags_output(irq_ptr); qdio_initialize_set_siga_flags_output(irq_ptr);
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
} }
int int
...@@ -2334,7 +2428,7 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2334,7 +2428,7 @@ qdio_allocate(struct qdio_initialize *init_data)
qdio_allocate_do_dbf(init_data); qdio_allocate_do_dbf(init_data);
/* create irq */ /* create irq */
irq_ptr=kmalloc(sizeof(struct qdio_irq),GFP_DMA); irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA);
QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
...@@ -2347,7 +2441,7 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2347,7 +2441,7 @@ qdio_allocate(struct qdio_initialize *init_data)
memset(irq_ptr,0,sizeof(struct qdio_irq)); memset(irq_ptr,0,sizeof(struct qdio_irq));
/* wipes qib.ac, required by ar7063 */ /* wipes qib.ac, required by ar7063 */
irq_ptr->qdr=kmalloc(sizeof(struct qdr),GFP_DMA); irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
if (!(irq_ptr->qdr)) { if (!(irq_ptr->qdr)) {
kfree(irq_ptr->qdr); kfree(irq_ptr->qdr);
kfree(irq_ptr); kfree(irq_ptr);
...@@ -2372,11 +2466,6 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2372,11 +2466,6 @@ qdio_allocate(struct qdio_initialize *init_data)
if (!irq_ptr->dev_st_chg_ind) { if (!irq_ptr->dev_st_chg_ind) {
QDIO_PRINT_WARN("no indicator location available " \ QDIO_PRINT_WARN("no indicator location available " \
"for irq 0x%x\n",irq_ptr->irq); "for irq 0x%x\n",irq_ptr->irq);
/*
* FIXME:
* qdio_release_irq_memory does MOD_DEC_USE_COUNT
* in an unbalanced fashion (see 30 lines farther down)
*/
qdio_release_irq_memory(irq_ptr); qdio_release_irq_memory(irq_ptr);
return -ENOBUFS; return -ENOBUFS;
} }
...@@ -2396,19 +2485,17 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2396,19 +2485,17 @@ qdio_allocate(struct qdio_initialize *init_data)
init_data->q_format,init_data->flags, init_data->q_format,init_data->flags,
init_data->input_sbal_addr_array, init_data->input_sbal_addr_array,
init_data->output_sbal_addr_array)) { init_data->output_sbal_addr_array)) {
/*
* FIXME:
* qdio_release_irq_memory does MOD_DEC_USE_COUNT
* in an unbalanced fashion (see 10 lines farther down)
*/
qdio_release_irq_memory(irq_ptr); qdio_release_irq_memory(irq_ptr);
return -ENOMEM; return -ENOMEM;
} }
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
MOD_INC_USE_COUNT; if (!try_module_get(THIS_MODULE)) {
QDIO_DBF_TEXT3(0,setup,"MOD_INC_"); QDIO_PRINT_CRIT("try_module_get() failed!\n");
qdio_release_irq_memory(irq_ptr);
return -EINVAL;
}
init_MUTEX_LOCKED(&irq_ptr->setting_up_sema); init_MUTEX_LOCKED(&irq_ptr->setting_up_sema);
...@@ -2474,6 +2561,10 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2474,6 +2561,10 @@ qdio_allocate(struct qdio_initialize *init_data)
} else } else
irq_ptr->aqueue = *ciw; irq_ptr->aqueue = *ciw;
/* Set new interrupt handler. */
irq_ptr->original_int_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_handler;
/* the iqdio CHSC stuff */ /* the iqdio CHSC stuff */
if (irq_ptr->is_iqdio_irq) { if (irq_ptr->is_iqdio_irq) {
/* iqdio_enable_adapter_int_facility(irq_ptr);*/ /* iqdio_enable_adapter_int_facility(irq_ptr);*/
...@@ -2485,25 +2576,12 @@ qdio_allocate(struct qdio_initialize *init_data) ...@@ -2485,25 +2576,12 @@ qdio_allocate(struct qdio_initialize *init_data)
result=iqdio_set_subchannel_ind(irq_ptr,0); result=iqdio_set_subchannel_ind(irq_ptr,0);
if (result) { if (result) {
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
/*
* FIXME:
* need some callback pointers to be set already,
* i.e. irq_ptr->cleanup_irq and irq_ptr->cleanup_timeout?
* (see 10 lines farther down)
*/
qdio_cleanup(init_data->cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); qdio_cleanup(init_data->cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return result; return result;
} }
iqdio_set_delay_target(irq_ptr,IQDIO_DELAY_TARGET); iqdio_set_delay_target(irq_ptr,IQDIO_DELAY_TARGET);
} }
/* Set callback functions. */
irq_ptr->cleanup_irq = qdio_cleanup_handle_irq;
irq_ptr->cleanup_timeout = qdio_cleanup_handle_timeout;
irq_ptr->establish_irq = qdio_establish_handle_irq;
irq_ptr->establish_timeout = qdio_establish_handle_timeout;
irq_ptr->handler = qdio_handler;
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
return 0; return 0;
...@@ -2538,13 +2616,14 @@ qdio_establish(struct ccw_device *cdev) ...@@ -2538,13 +2616,14 @@ qdio_establish(struct ccw_device *cdev)
spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
ccw_device_set_timeout(cdev, QDIO_ESTABLISH_TIMEOUT);
ccw_device_set_options(cdev, 0); ccw_device_set_options(cdev, 0);
result=ccw_device_start(cdev,&irq_ptr->ccw, result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
QDIO_DOING_ESTABLISH,0,0); QDIO_DOING_ESTABLISH,0,0,
QDIO_ESTABLISH_TIMEOUT);
if (result) { if (result) {
result2=ccw_device_start(cdev,&irq_ptr->ccw, result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
QDIO_DOING_ESTABLISH,0,0); QDIO_DOING_ESTABLISH,0,0,
QDIO_ESTABLISH_TIMEOUT);
sprintf(dbf_text,"eq:io%4x",result); sprintf(dbf_text,"eq:io%4x",result);
QDIO_DBF_TEXT2(1,setup,dbf_text); QDIO_DBF_TEXT2(1,setup,dbf_text);
if (result2) { if (result2) {
...@@ -2556,8 +2635,7 @@ qdio_establish(struct ccw_device *cdev) ...@@ -2556,8 +2635,7 @@ qdio_establish(struct ccw_device *cdev)
irq_ptr->irq,result,result2); irq_ptr->irq,result,result2);
result=result2; result=result2;
} }
if (result == 0)
cdev->private->state = DEV_STATE_QDIO_INIT;
spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
if (result) { if (result) {
...@@ -2567,13 +2645,15 @@ qdio_establish(struct ccw_device *cdev) ...@@ -2567,13 +2645,15 @@ qdio_establish(struct ccw_device *cdev)
} }
wait_event(cdev->private->wait_q, wait_event(cdev->private->wait_q,
dev_fsm_final_state(cdev) || irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
(irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)); irq_ptr->state == QDIO_IRQ_STATE_ERR);
if (cdev->private->state == DEV_STATE_QDIO_INIT) if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
result = 0; result = 0;
else else {
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
result = -EIO; result = -EIO;
}
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
...@@ -2593,7 +2673,7 @@ qdio_activate(struct ccw_device *cdev, int flags) ...@@ -2593,7 +2673,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
if (cdev->private->state != DEV_STATE_QDIO_INIT) if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL; return -EINVAL;
down(&irq_ptr->setting_up_sema); down(&irq_ptr->setting_up_sema);
...@@ -2637,8 +2717,6 @@ qdio_activate(struct ccw_device *cdev, int flags) ...@@ -2637,8 +2717,6 @@ qdio_activate(struct ccw_device *cdev, int flags)
if (result) if (result)
goto out; goto out;
cdev->private->state = DEV_STATE_QDIO_ACTIVE;
for (i=0;i<irq_ptr->no_input_qs;i++) { for (i=0;i<irq_ptr->no_input_qs;i++) {
if (irq_ptr->is_iqdio_irq) { if (irq_ptr->is_iqdio_irq) {
/* /*
...@@ -2659,9 +2737,9 @@ qdio_activate(struct ccw_device *cdev, int flags) ...@@ -2659,9 +2737,9 @@ qdio_activate(struct ccw_device *cdev, int flags)
} }
} }
qdio_wait_nonbusy(QDIO_ACTIVATE_DELAY); qdio_wait_nonbusy(QDIO_ACTIVATE_TIMEOUT);
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ACTIVE); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
out: out:
up(&irq_ptr->setting_up_sema); up(&irq_ptr->setting_up_sema);
...@@ -2807,12 +2885,10 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, ...@@ -2807,12 +2885,10 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
{ {
struct qdio_irq *irq_ptr; struct qdio_irq *irq_ptr;
#ifdef QDIO_DBF_LIKE_HELL
char dbf_text[20]; char dbf_text[20];
sprintf(dbf_text,"doQD%04x",irq); sprintf(dbf_text,"doQD%04x",cdev->private->irq);
QDIO_DBF_TEXT3(0,trace,dbf_text); QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
(count>QDIO_MAX_BUFFERS_PER_Q) || (count>QDIO_MAX_BUFFERS_PER_Q) ||
...@@ -2826,7 +2902,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, ...@@ -2826,7 +2902,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
#ifdef QDIO_DBF_LIKE_HELL
if (callflags&QDIO_FLAG_SYNC_INPUT) if (callflags&QDIO_FLAG_SYNC_INPUT)
QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
sizeof(void*)); sizeof(void*));
...@@ -2837,7 +2912,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags, ...@@ -2837,7 +2912,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
QDIO_DBF_TEXT3(0,trace,dbf_text); QDIO_DBF_TEXT3(0,trace,dbf_text);
sprintf(dbf_text,"qi%02xct%02x",qidx,count); sprintf(dbf_text,"qi%02xct%02x",qidx,count);
QDIO_DBF_TEXT3(0,trace,dbf_text); QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* QDIO_DBF_LIKE_HELL */
if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
return -EBUSY; return -EBUSY;
...@@ -2989,25 +3063,12 @@ qdio_get_qdio_memory(void) ...@@ -2989,25 +3063,12 @@ qdio_get_qdio_memory(void)
GFP_KERNEL); GFP_KERNEL);
if (!indicators) return -ENOMEM; if (!indicators) return -ENOMEM;
memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE)); memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE));
chsc_area=(struct qdio_chsc_area *)
kmalloc(sizeof(struct qdio_chsc_area),GFP_KERNEL);
QDIO_DBF_TEXT3(0,trace,"chscarea"); \
QDIO_DBF_HEX3(0,trace,&chsc_area,sizeof(void*)); \
if (!chsc_area) {
QDIO_PRINT_ERR("not enough memory for chsc area. Cannot " \
"initialize QDIO.\n");
kfree(indicators);
return -ENOMEM;
}
memset(chsc_area,0,sizeof(struct qdio_chsc_area));
return 0; return 0;
} }
static void static void
qdio_release_qdio_memory(void) qdio_release_qdio_memory(void)
{ {
kfree(chsc_area);
if (indicators) if (indicators)
kfree(indicators); kfree(indicators);
} }
......
#ifndef _CIO_QDIO_H #ifndef _CIO_QDIO_H
#define _CIO_QDIO_H #define _CIO_QDIO_H
#define VERSION_CIO_QDIO_H "$Revision: 1.11 $" #define VERSION_CIO_QDIO_H "$Revision: 1.16 $"
//#define QDIO_DBF_LIKE_HELL //#define QDIO_DBF_LIKE_HELL
...@@ -48,25 +48,25 @@ ...@@ -48,25 +48,25 @@
#define QDIO_STATS_CLASSES 2 #define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/ #define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_ACTIVATE_DELAY 5 /* according to brenton belmar and paul
gioquindo it can take up to 5ms before
queues are really active */
#define QDIO_NO_USE_COUNT_TIME 10 #define QDIO_NO_USE_COUNT_TIME 10
#define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before #define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before
exiting without having use_count exiting without having use_count
of the queue to 0 */ of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT 1000 #define QDIO_ESTABLISH_TIMEOUT 1000
#define QDIO_ACTIVATE_TIMEOUT 100 #define QDIO_ACTIVATE_TIMEOUT 5
#define QDIO_CLEANUP_CLEAR_TIMEOUT 20000 #define QDIO_CLEANUP_CLEAR_TIMEOUT 20000
#define QDIO_CLEANUP_HALT_TIMEOUT 10000 #define QDIO_CLEANUP_HALT_TIMEOUT 10000
#define QDIO_IRQ_STATE_FRESH 0 /* must be 0 -> memset has set it to 0 */ enum qdio_irq_states {
#define QDIO_IRQ_STATE_INACTIVE 1 QDIO_IRQ_STATE_INACTIVE,
#define QDIO_IRQ_STATE_ESTABLISHED 2 QDIO_IRQ_STATE_ESTABLISHED,
#define QDIO_IRQ_STATE_ACTIVE 3 QDIO_IRQ_STATE_ACTIVE,
#define QDIO_IRQ_STATE_STOPPED 4 QDIO_IRQ_STATE_STOPPED,
QDIO_IRQ_STATE_CLEANUP,
QDIO_IRQ_STATE_ERR,
NR_QDIO_IRQ_STATES,
};
/* used as intparm in do_IO: */ /* used as intparm in do_IO: */
#define QDIO_DOING_SENSEID 0 #define QDIO_DOING_SENSEID 0
...@@ -443,81 +443,6 @@ do_clear_global_summary(void) ...@@ -443,81 +443,6 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
struct qdio_chsc_area {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
/* word 1 */
__u16 operation_code;
__u16 first_sch;
/* word 2 */
__u8 reserved1;
__u8 image_id;
__u16 last_sch;
/* word 3 */
__u32 reserved2;
/* word 4 */
union {
struct {
/* word 4&5 */
__u64 summary_indicator_addr;
/* word 6&7 */
__u64 subchannel_indicator_addr;
/* word 8 */
int ks:4;
int kc:4;
int reserved1:21;
int isc:3;
/* word 9&10 */
__u32 reserved2[2];
/* word 11 */
__u32 subsystem_id;
/* word 12-1015 */
__u32 reserved3[1004];
} __attribute__ ((packed,aligned(4))) set_chsc;
struct {
/* word 4&5 */
__u32 reserved1[2];
/* word 6 */
__u32 delay_target;
/* word 7-1015 */
__u32 reserved4[1009];
} __attribute__ ((packed,aligned(4))) set_chsc_fast;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
/* words 2 to 9 for st sch qdio data */
__u8 flags;
__u8 reserved2;
__u16 sch;
__u8 qfmt;
__u8 reserved3;
__u8 qdioac;
__u8 sch_class;
__u8 reserved4;
__u8 icnt;
__u8 reserved5;
__u8 ocnt;
/* plus 5 words of reserved fields */
} __attribute__ ((packed,aligned(8)))
store_qdio_data_response;
} operation_data_area;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE)));
#ifdef QDIO_PERFORMANCE_STATS #ifdef QDIO_PERFORMANCE_STATS
struct qdio_perf_stats { struct qdio_perf_stats {
unsigned int tl_runs; unsigned int tl_runs;
...@@ -623,7 +548,7 @@ struct qdio_q { ...@@ -623,7 +548,7 @@ struct qdio_q {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */ #endif /* QDIO_USE_TIMERS_FOR_POLLING */
unsigned int state; enum qdio_irq_states state;
/* used to store the error condition during a data transfer */ /* used to store the error condition during a data transfer */
unsigned int qdio_error; unsigned int qdio_error;
...@@ -674,7 +599,7 @@ struct qdio_irq { ...@@ -674,7 +599,7 @@ struct qdio_irq {
unsigned int hydra_gives_outbound_pcis; unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis; unsigned int sync_done_on_outb_pcis;
unsigned int state; enum qdio_irq_states state;
struct semaphore setting_up_sema; struct semaphore setting_up_sema;
unsigned int no_input_qs; unsigned int no_input_qs;
...@@ -694,13 +619,8 @@ struct qdio_irq { ...@@ -694,13 +619,8 @@ struct qdio_irq {
struct qib qib; struct qib qib;
/* Functions called via the generic cio layer */ void (*original_int_handler) (struct ccw_device *,
void (*cleanup_irq) (struct ccw_device *, unsigned long, struct irb *); unsigned long, struct irb *);
void (*cleanup_timeout) (struct ccw_device *);
void (*establish_irq) (struct ccw_device *, unsigned long,
struct irb *);
void (*establish_timeout) (struct ccw_device *);
void (*handler) (struct ccw_device *, unsigned long, struct irb *);
}; };
#endif #endif
...@@ -44,7 +44,11 @@ init_IRQ(void) ...@@ -44,7 +44,11 @@ init_IRQ(void)
/* /*
* Let's build our path group ID here. * Let's build our path group ID here.
*/ */
global_pgid.cpu_addr = *(__u16 *) __LC_CPUADDR; #ifdef CONFIG_SMP
global_pgid.cpu_addr = hard_smp_processor_id();
#else
global_pgid.cpu_addr = 0;
#endif
global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
global_pgid.tod_high = (__u32) (get_clock() >> 32); global_pgid.tod_high = (__u32) (get_clock() >> 32);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
extern void css_process_crw(int); extern void css_process_crw(int);
extern void chsc_process_crw(void); extern void chsc_process_crw(void);
extern void chp_process_crw(int); extern void chp_process_crw(int, int);
static void static void
s390_handle_damage(char *msg) s390_handle_damage(char *msg)
...@@ -62,7 +62,17 @@ s390_collect_crw_info(void) ...@@ -62,7 +62,17 @@ s390_collect_crw_info(void)
break; break;
case CRW_RSC_CPATH: case CRW_RSC_CPATH:
pr_debug("source is channel path %02X\n", crw.rsid); pr_debug("source is channel path %02X\n", crw.rsid);
chp_process_crw(crw.rsid); switch (crw.erc) {
case CRW_ERC_IPARM: /* Path has come. */
chp_process_crw(crw.rsid, 1);
break;
case CRW_ERC_PERRI: /* Path has gone. */
chp_process_crw(crw.rsid, 0);
break;
default:
pr_debug("Don't know how to handle erc=%x\n",
crw.erc);
}
break; break;
case CRW_RSC_CONFIG: case CRW_RSC_CONFIG:
pr_debug("source is configuration-alert facility\n"); pr_debug("source is configuration-alert facility\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment