Commit 01613ea0 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: common i/o layer.

updates for the channel subsystem and qdio driver

This adds the missing support for chp machine checks, i.e.
enabling or disabling a set of devices from the service element.
Some minor bugs in the driver are fixed as well.
parent cbc67add
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
* $Revision: 1.5 $
* $Revision: 1.6 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
* $Revision: 1.46 $
* $Revision: 1.57 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -53,12 +53,6 @@ chsc_chpid_logical (struct subchannel *sch, int chp)
return test_bit (sch->schib.pmcw.chpid[chp], chpids_logical);
}
static inline void
chsc_clear_chpid(struct subchannel *sch, int chp)
{
clear_bit(sch->schib.pmcw.chpid[chp], chpids);
}
void
chsc_validate_chpids(struct subchannel *sch)
{
......@@ -69,17 +63,10 @@ chsc_validate_chpids(struct subchannel *sch)
for (chp = 0; chp <= 7; chp++) {
mask = 0x80 >> chp;
if (sch->lpm & mask) {
if (sch->lpm & mask)
if (!chsc_chpid_logical(sch, chp))
/* disable using this path */
sch->lpm &= ~mask;
} else {
/* This chpid is not
* available to us */
chsc_clear_chpid(sch, chp);
if (test_bit(chp, chpids_known))
set_chp_status(chp, CHP_STANDBY);
}
}
}
......@@ -278,6 +265,7 @@ s390_set_chpid_offline( __u8 chpid)
sch = ioinfo[irq];
if (sch == NULL)
continue; /* we don't know the device anyway */
/* FIXME: Kill pending I/O. */
s390_subchannel_remove_chpid(sch, chpid);
}
#endif
......@@ -381,6 +369,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
continue;
}
/* FIXME: Kill pending I/O. */
spin_lock_irq(&sch->lock);
chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
......@@ -413,7 +402,7 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
static void
do_process_crw(void *ignore)
{
int ccode;
int do_sei;
/*
* build the chsc request block for store event information
......@@ -438,39 +427,57 @@ do_process_crw(void *ignore)
CIO_TRACE_EVENT( 2, "prcss");
do_sei = 1;
while (do_sei) {
int ccode;
ccode = chsc(&chsc_area_sei);
if (ccode > 0)
return;
switch (chsc_area_sei.response_block.response_code) {
switch (sei_res->response_code) {
/* for debug purposes, check for problems */
case 0x0001:
break; /* everything ok */
case 0x0002:
CIO_CRW_EVENT(2, "chsc_process_crw:invalid command!\n");
CIO_CRW_EVENT(2,
"chsc_process_crw: invalid command!\n");
return;
case 0x0003:
CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
"request block!\n");
return;
case 0x0005:
CIO_CRW_EVENT(2, "chsc_process_crw: no event information "
"stored\n");
CIO_CRW_EVENT(2, "chsc_process_crw: no event "
"information stored\n");
return;
default:
CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
chsc_area_sei.response_block.response_code);
sei_res->response_code);
return;
}
CIO_CRW_EVENT(4, "chsc_process_crw: event information successfully "
"stored\n");
CIO_CRW_EVENT(4, "chsc_process_crw: event information "
"successfully stored\n");
/* Check if there is more event information pending. */
if (sei_res->flags & 0x80)
CIO_CRW_EVENT( 2, "chsc_process_crw: "
"further event information pending\n");
else
do_sei = 0;
/* Check if we might have lost some information. */
if (sei_res->flags & 0x40)
CIO_CRW_EVENT( 2, "chsc_process_crw: Event information "
"has been lost due to overflow!\n");
if (sei_res->rs != 4) {
CIO_CRW_EVENT(2, "chsc_process_crw: "
"reporting source (%04X) isn't a chpid!"
"Aborting processing of machine check...\n",
CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
"(%04X) isn't a chpid!\n",
sei_res->rsid);
return;
continue;
}
/* which kind of information was stored? */
......@@ -487,8 +494,8 @@ do_process_crw(void *ignore)
CIO_CRW_EVENT(4, "chsc_process_crw: "
"channel subsystem reports some I/O "
"devices may have become accessible\n");
pr_debug( KERN_DEBUG "Data received after sei: \n");
pr_debug( KERN_DEBUG "Validity flags: %x\n", sei_res->vf);
pr_debug("Data received after sei: \n");
pr_debug("Validity flags: %x\n", sei_res->vf);
/* allocate a new channel path structure, if needed */
if (chps[sei_res->rsid] == NULL)
......@@ -497,27 +504,34 @@ do_process_crw(void *ignore)
set_chp_status(sei_res->rsid, CHP_ONLINE);
if ((sei_res->vf & 0x80) == 0) {
pr_debug( KERN_DEBUG "chpid: %x\n", sei_res->rsid);
pr_debug("chpid: %x\n", sei_res->rsid);
s390_process_res_acc(sei_res->rsid, 0, 0);
} else if ((sei_res->vf & 0xc0) == 0x80) {
pr_debug( KERN_DEBUG "chpid: %x link addr: %x\n",
pr_debug("chpid: %x link addr: %x\n",
sei_res->rsid, sei_res->fla);
s390_process_res_acc(sei_res->rsid, sei_res->fla,
0xff00);
s390_process_res_acc(sei_res->rsid,
sei_res->fla, 0xff00);
} else if ((sei_res->vf & 0xc0) == 0xc0) {
pr_debug( KERN_DEBUG "chpid: %x full link addr: %x\n",
pr_debug("chpid: %x full link addr: %x\n",
sei_res->rsid, sei_res->fla);
s390_process_res_acc(sei_res->rsid, sei_res->fla,
0xffff);
s390_process_res_acc(sei_res->rsid,
sei_res->fla, 0xffff);
}
pr_debug( KERN_DEBUG "\n");
pr_debug("\n");
break;
default: /* other stuff */
CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", sei_res->cc);
CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
sei_res->cc);
break;
}
if (do_sei) {
memset(&chsc_area_sei, 0, sizeof(struct sei_area));
chsc_area_sei.request_block.command_code1 = 0x0010;
chsc_area_sei.request_block.command_code2 = 0x000e;
}
}
}
void
......@@ -528,6 +542,99 @@ chsc_process_crw(void)
schedule_work(&work);
}
static void
chp_add(int chpid)
{
struct subchannel *sch;
int irq, ret;
char dbf_txt[15];
if (!test_bit(chpid, chpids_logical))
return; /* no need to do the rest */
sprintf(dbf_txt, "cadd%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
for (irq = 0; irq <= __MAX_SUBCHANNELS; irq++) {
int i;
sch = ioinfo[irq];
if (!sch) {
ret = css_probe_device(irq);
if (ret == -ENXIO)
/* We're through */
return;
continue;
}
/* FIXME: Kill pending I/O. */
spin_lock(&sch->lock);
for (i=0; i<8; i++)
if (sch->schib.pmcw.chpid[i] == chpid) {
if (stsch(sch->irq, &sch->schib) != 0) {
/* Endgame. */
spin_unlock(&sch->lock);
return;
}
break;
}
if (i==8) {
spin_unlock(&sch->lock);
return;
}
sch->lpm = (sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
| 0x80 >> i;
chsc_validate_chpids(sch);
dev_fsm_event(sch->dev.driver_data, DEV_EVENT_VERIFY);
spin_unlock(&sch->lock);
}
}
/*
* Handling of crw machine checks with channel path source.
*/
void
chp_process_crw(int chpid)
{
/*
* Update our descriptions. We need this since we don't always
* get machine checks for path come and can't rely on our information
* being consistent otherwise.
*/
chsc_get_sch_descriptions();
if (!cio_chsc_desc_avail) {
/*
* Something went wrong...
* We can't reliably say whether a path was there before.
*/
CIO_CRW_EVENT(0, "Error: Could not retrieve "
"subchannel descriptions, will not process chp"
"machine check...\n");
return;
}
if (!test_bit(chpid, chpids)) {
/* Path has gone. We use the link incident routine.*/
s390_set_chpid_offline(chpid);
} else {
/*
* Path has come. Allocate a new channel path structure,
* if needed.
*/
if (chps[chpid] == NULL)
new_channel_path(chpid, CHP_ONLINE);
else
set_chp_status(chpid, CHP_ONLINE);
/* Avoid the extra overhead in process_rec_acc. */
chp_add(chpid);
}
}
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
......@@ -608,8 +715,7 @@ s390_vary_chpid( __u8 chpid, int on)
static ssize_t
chp_status_show(struct device *dev, char *buf)
{
struct sys_device *sdev = container_of(dev, struct sys_device, dev);
struct channel_path *chp = container_of(sdev, struct channel_path, sdev);
struct channel_path *chp = container_of(dev, struct channel_path, dev);
if (!chp)
return 0;
......@@ -631,8 +737,7 @@ chp_status_show(struct device *dev, char *buf)
static ssize_t
chp_status_write(struct device *dev, const char *buf, size_t count)
{
struct sys_device *sdev = container_of(dev, struct sys_device, dev);
struct channel_path *cp = container_of(sdev, struct channel_path, sdev);
struct channel_path *cp = container_of(dev, struct channel_path, dev);
char cmd[10];
int num_args;
int error;
......@@ -667,29 +772,29 @@ new_channel_path(int chpid, int status)
chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
if (!chp)
return -ENOMEM;
memset(chp, 0, sizeof(struct channel_path));
chps[chpid] = chp;
/* fill in status, etc. */
chp->id = chpid;
chp->state = status;
chp->dev.parent = &css_bus_device;
snprintf(chp->sdev.dev.name, DEVICE_NAME_SIZE,
snprintf(chp->dev.name, DEVICE_NAME_SIZE,
"channel path %x", chpid);
chp->sdev.name = "channel_path";
chp->sdev.id = chpid;
snprintf(chp->dev.bus_id, DEVICE_ID_SIZE, "chp%x", chpid);
/* make it known to the system */
ret = sys_device_register(&chp->sdev);
ret = device_register(&chp->dev);
if (ret) {
printk(KERN_WARNING "%s: could not register %02x\n",
__func__, chpid);
return ret;
}
ret = device_create_file(&chp->sdev.dev, &dev_attr_status);
ret = device_create_file(&chp->dev, &dev_attr_status);
if (ret)
sys_device_unregister(&chp->sdev);
device_unregister(&chp->dev);
return ret;
}
......
......@@ -95,7 +95,7 @@ struct ssd_area {
struct channel_path {
int id;
int state;
struct sys_device sdev;
struct device dev;
};
extern struct channel_path *chps[];
......
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
* $Revision: 1.90 $
* $Revision: 1.91 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -197,8 +197,7 @@ cio_start (struct subchannel *sch, /* subchannel structure */
sch->orb.pfch = sch->options.prefetch == 0;
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
sch->orb.lpm = (lpm != 0) ? (lpm & sch->lpm) : sch->lpm;
sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_ARCH_S390X
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
......
/*
* drivers/s390/cio/device.c
* bus driver for ccw devices
* $Revision: 1.45 $
* $Revision: 1.50 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
......@@ -215,6 +215,8 @@ online_show (struct device *dev, char *buf)
void
ccw_device_set_offline(struct ccw_device *cdev)
{
int ret;
if (!cdev)
return;
if (!cdev->online || !cdev->drv)
......@@ -226,23 +228,36 @@ ccw_device_set_offline(struct ccw_device *cdev)
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
ccw_device_offline(cdev);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else
//FIXME: we can't fail!
pr_debug("ccw_device_offline returned %d, device %s\n",
ret, cdev->dev.bus_id);
}
void
ccw_device_set_online(struct ccw_device *cdev)
{
if (!cdev || !cdev->handler)
int ret;
if (!cdev)
return;
if (cdev->online || !cdev->drv)
return;
spin_lock_irq(cdev->ccwlock);
ccw_device_online(cdev);
ret = ccw_device_online(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
pr_debug("ccw_device_online returned %d, device %s\n",
ret, cdev->dev.bus_id);
return;
}
if (cdev->private->state != DEV_STATE_ONLINE)
return;
if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
......@@ -250,9 +265,13 @@ ccw_device_set_online(struct ccw_device *cdev)
return;
}
spin_lock_irq(cdev->ccwlock);
ccw_device_offline(cdev);
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else
pr_debug("ccw_device_offline returned %d, device %s\n",
ret, cdev->dev.bus_id);
}
static ssize_t
......@@ -574,7 +593,7 @@ ccw_device_remove (struct device *dev)
* doubled code.
* This is safe because of the checks in ccw_device_set_offline.
*/
pr_debug(KERN_INFO "removing device %s, sch %d, devno %x\n",
pr_debug("removing device %s, sch %d, devno %x\n",
cdev->dev.name,
cdev->private->irq,
cdev->private->devno);
......
......@@ -136,7 +136,7 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
ccode = diag210 (&diag_data);
ps->reserved = 0xff;
/* Special case for bloddy osa devices. */
/* Special case for bloody osa devices. */
if (diag_data.vrdcvcla == 0x02 &&
diag_data.vrdcvtyp == 0x20) {
ps->cu_type = 0x3088;
......
......@@ -48,7 +48,8 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
if (!cdev)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
cdev->private->state != DEV_STATE_W4SENSE &&
cdev->private->state != DEV_STATE_QDIO_ACTIVE)
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
if (!sch)
......@@ -122,6 +123,15 @@ ccw_device_call_handler(struct ccw_device *cdev)
{
struct subchannel *sch;
unsigned int stctl;
void (*handler)(struct ccw_device *, unsigned long, struct irb *);
if (cdev->private->state == DEV_STATE_QDIO_ACTIVE) {
if (cdev->private->qdio_data)
handler = cdev->private->qdio_data->handler;
else
handler = NULL;
} else
handler = cdev->handler;
sch = to_subchannel(cdev->dev.parent);
......@@ -144,13 +154,9 @@ ccw_device_call_handler(struct ccw_device *cdev)
/*
* Now we are ready to call the device driver interrupt handler.
*/
if (cdev->private->state == DEV_STATE_QDIO_ACTIVE) {
if (cdev->private->qdio_data &&
cdev->private->qdio_data->handler)
cdev->private->qdio_data->handler(cdev, sch->u_intparm,
&cdev->private->irb);
} else
cdev->handler (cdev, sch->u_intparm, &cdev->private->irb);
if (handler)
handler(cdev, sch->u_intparm, &cdev->private->irb);
/*
* Clear the old and now useless interrupt response block.
*/
......@@ -245,6 +251,8 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
wait_event(cdev->private->wait_q,
sch->schib.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags);
/* FIXME: Check if we got sensible stuff. */
break;
}
}
/* Restore interrupt handler. */
......@@ -317,6 +325,8 @@ read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
spin_unlock_irqrestore(&sch->lock, flags);
wait_event(cdev->private->wait_q, sch->schib.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags);
/* FIXME: Check if we got sensible stuff. */
break;
}
/* Restore interrupt handler. */
cdev->handler = handler;
......
......@@ -141,6 +141,8 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct subchannel *sch;
struct irb *irb;
int ret;
int opm;
int i;
irb = (struct irb *) __LC_IRB;
/* Ignore unsolicited interrupts. */
......@@ -154,6 +156,16 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case 0: /* Sense Path Group ID successful. */
cdev->private->flags.pgid_supp = 1;
opm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom;
for (i=0;i<8;i++) {
if (opm == (0x80 << i)) {
/* Don't group single path devices. */
cdev->private->flags.pgid_supp = 0;
break;
}
}
if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
memcpy(&cdev->private->pgid, &global_pgid,
sizeof(struct pgid));
......
......@@ -62,8 +62,9 @@ ccw_device_path_notoper(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
stsch (sch->irq, &sch->schib);
CIO_MSG_EVENT(0, "cio_process_irq(%04X) - path(s) %02x are "
"not operational ", sch->irq, sch->schib.pmcw.pnom);
CIO_MSG_EVENT(0, "%s(%04X) - path(s) %02x are "
"not operational \n", __FUNCTION__, sch->irq,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
......@@ -289,7 +290,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
sch = to_subchannel(cdev->dev.parent);
/* A sense is required, can we do it now ? */
if (irb->scsw.actl != 0)
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
/*
* we received an Unit Check but we have no final
* status yet, therefore we must delay the SENSE
......@@ -348,7 +349,7 @@ int
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
{
ccw_device_accumulate_irb(cdev, irb);
if (irb->scsw.actl != 0)
if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
return -EBUSY;
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
......
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
/*
* area for channel subsystem call
*/
struct chsc_area {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
union {
struct {
/* word 1 */
__u32 reserved1;
/* word 2 */
__u32 reserved2;
} __attribute__ ((packed,aligned(8))) sei_req;
struct {
/* word 1 */
__u16 reserved1;
__u16 f_sch; /* first subchannel */
/* word 2 */
__u16 reserved2;
__u16 l_sch; /* last subchannel */
} __attribute__ ((packed,aligned(8))) ssd_req;
} request_block_data;
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
union {
struct {
/* word 2 */
__u8 flags;
__u8 vf; /* validity flags */
__u8 rs; /* reporting source */
__u8 cc; /* content code */
/* word 3 */
__u16 fla; /* full link address */
__u16 rsid; /* reporting source id */
/* word 4 */
__u32 reserved2;
/* word 5 */
__u32 reserved3;
/* word 6 */
__u32 ccdf; /* content-code dependent field */
/* word 7 */
__u32 reserved4;
/* word 8 */
__u32 reserved5;
/* word 9 */
__u32 reserved6;
} __attribute__ ((packed,aligned(8))) sei_res;
struct {
/* word 2 */
__u8 sch_valid : 1;
__u8 dev_valid : 1;
__u8 st : 3; /* subchannel type */
__u8 zeroes : 3;
__u8 unit_addr; /* unit address */
__u16 devno; /* device number */
/* word 3 */
__u8 path_mask;
__u8 fla_valid_mask;
__u16 sch; /* subchannel */
/* words 4-5 */
__u8 chpid[8]; /* chpids 0-7 */
/* words 6-9 */
__u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed,aligned(8))) ssd_res;
} response_block_data;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE)));
/*
* TPI info structure
*/
......
This diff is collapsed.
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
#define VERSION_CIO_QDIO_H "$Revision: 1.8 $"
#define VERSION_CIO_QDIO_H "$Revision: 1.11 $"
//#define QDIO_DBF_LIKE_HELL
......@@ -48,6 +48,10 @@
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_ACTIVATE_DELAY 5 /* according to brenton belmar and paul
gioquindo it can take up to 5ms before
queues are really active */
#define QDIO_NO_USE_COUNT_TIME 10
#define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before
exiting without having use_count
......@@ -579,6 +583,7 @@ struct qdio_q {
int is_input_q;
int irq;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
......@@ -670,7 +675,7 @@ struct qdio_irq {
unsigned int sync_done_on_outb_pcis;
unsigned int state;
spinlock_t setting_up_lock;
struct semaphore setting_up_sema;
unsigned int no_input_qs;
unsigned int no_output_qs;
......
......@@ -21,6 +21,7 @@
extern void css_process_crw(int);
extern void chsc_process_crw(void);
extern void chp_process_crw(int);
static void
s390_handle_damage(char *msg)
......@@ -53,29 +54,25 @@ s390_collect_crw_info(void)
crw.erc, crw.rsid);
switch (crw.rsc) {
case CRW_RSC_SCH:
pr_debug(KERN_NOTICE, "source is subchannel %04X\n",
crw.rsid);
pr_debug("source is subchannel %04X\n", crw.rsid);
css_process_crw (crw.rsid);
break;
case CRW_RSC_MONITOR:
pr_debug(KERN_NOTICE,
"source is monitoring facility\n");
pr_debug("source is monitoring facility\n");
break;
case CRW_RSC_CPATH:
pr_debug(KERN_NOTICE,
"source is channel path %02X\n",
pcrwe->crw.rsid);
pr_debug("source is channel path %02X\n", crw.rsid);
chp_process_crw(crw.rsid);
break;
case CRW_RSC_CONFIG:
pr_debug(KERN_NOTICE,
"source is configuration-alert facility\n");
pr_debug("source is configuration-alert facility\n");
break;
case CRW_RSC_CSS:
pr_debug(KERN_NOTICE, "source is channel subsystem\n");
pr_debug("source is channel subsystem\n");
chsc_process_crw();
break;
default:
pr_debug(KERN_NOTICE, "unknown source\n");
pr_debug("unknown source\n");
break;
}
} while (crw.chn);
......
......@@ -50,11 +50,11 @@ struct qdio_buffer{
} __attribute__ ((packed,aligned(256)));
/* params are: irq, status, qdio_error, siga_error,
/* params are: ccw_device, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(int,unsigned int,unsigned int,unsigned int,
unsigned int,int,int,unsigned long);
typedef void qdio_handler_t(struct ccw_device *,unsigned int,unsigned int,
unsigned int,unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
......@@ -100,6 +100,8 @@ struct qdio_initialize{
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
};
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct ccw_device *);
extern int qdio_activate(struct ccw_device *,int flags);
......@@ -127,6 +129,8 @@ extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device*);
unsigned char qdio_get_slsb_state(struct ccw_device*, unsigned int flag,
unsigned int queue_number,
......
......@@ -50,11 +50,11 @@ struct qdio_buffer{
} __attribute__ ((packed,aligned(256)));
/* params are: irq, status, qdio_error, siga_error,
/* params are: ccw_device, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(int,unsigned int,unsigned int,unsigned int,
unsigned int,int,int,unsigned long);
typedef void qdio_handler_t(struct ccw_device *,unsigned int,unsigned int,
unsigned int,unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
......@@ -100,6 +100,8 @@ struct qdio_initialize{
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
};
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct ccw_device *);
extern int qdio_activate(struct ccw_device *,int flags);
......@@ -127,6 +129,8 @@ extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device*);
unsigned char qdio_get_slsb_state(struct ccw_device*, unsigned int flag,
unsigned int queue_number,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment