Commit 3e2cc616 authored by Kai Germaschewski's avatar Kai Germaschewski

ISDN/HiSax: Shared function for scheduling a B-channel event

There's no need for each hardware driver to implement its own
(short) xxx_schedule_event().
parent 7db400e0
......@@ -191,13 +191,6 @@ struct BCState *Sel_BCS(struct IsdnCardState *cs, int channel)
return(NULL);
}
void inline
hdlc_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
void
write_ctrl(struct BCState *bcs, int which) {
......@@ -252,7 +245,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS;
write_ctrl(bcs, 1);
bcs->hw.hdlc.ctrl.sr.cmd = 0;
hdlc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
break;
case (L1_MODE_HDLC):
bcs->mode = mode;
......@@ -263,7 +256,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
bcs->hw.hdlc.ctrl.sr.cmd = HDLC_CMD_XRS;
write_ctrl(bcs, 1);
bcs->hw.hdlc.ctrl.sr.cmd = 0;
hdlc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
break;
}
}
......@@ -425,7 +418,7 @@ HDLC_irq(struct BCState *bcs, u_int stat) {
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hdlc.rcvidx = 0;
hdlc_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
} else {
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs, "invalid frame");
......@@ -444,7 +437,6 @@ HDLC_irq(struct BCState *bcs, u_int stat) {
skb_push(bcs->tx_skb, bcs->hw.hdlc.count);
bcs->tx_cnt += bcs->hw.hdlc.count;
bcs->hw.hdlc.count = 0;
// hdlc_sched_event(bcs, B_XMTBUFREADY);
if (bcs->cs->debug & L1_DEB_WARN)
debugl1(bcs->cs, "ch%d XDU", bcs->channel);
} else if (bcs->cs->debug & L1_DEB_WARN)
......@@ -462,7 +454,7 @@ HDLC_irq(struct BCState *bcs, u_int stat) {
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hdlc_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.hdlc.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -471,7 +463,7 @@ HDLC_irq(struct BCState *bcs, u_int stat) {
hdlc_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
hdlc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -1825,12 +1825,6 @@ static void hisax_bh(void *data)
}
}
static void hisax_b_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
static inline void D_L2L1(struct hisax_d_if *d_if, int pr, void *arg)
{
struct hisax_if *ifc = (struct hisax_if *) d_if;
......@@ -1907,13 +1901,13 @@ static void hisax_b_l1l2(struct hisax_if *ifc, int pr, void *arg)
break;
case PH_DATA | INDICATION:
skb_queue_tail(&bcs->rqueue, arg);
hisax_b_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
break;
case PH_DATA | CONFIRM:
skb = arg;
bcs->tx_cnt -= skb->truesize;
skb_queue_tail(&bcs->cmpl_queue, skb);
hisax_b_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
skb = skb_dequeue(&bcs->squeue);
if (skb) {
B_L2L1(b_if, PH_DATA | REQUEST, skb);
......
......@@ -542,7 +542,7 @@ Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
}
}
bcs->hw.hscx.rcvidx = 0;
hscx_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
if (val & 0x40) { /* RPF */
Memhscx_empty_fifo(bcs, fifo_size);
......@@ -555,7 +555,7 @@ Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
hscx_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
if (val & 0x10) { /* XPR */
......@@ -565,7 +565,7 @@ Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.hscx.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -574,7 +574,7 @@ Memhscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
Memhscx_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
hscx_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -301,7 +301,7 @@ modem_fill(struct BCState *bcs) {
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
bcs->hw.hscx.count = 0;
......@@ -309,7 +309,7 @@ modem_fill(struct BCState *bcs) {
write_modem(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
hscx_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
......@@ -344,7 +344,7 @@ static inline void receive_chars(struct IsdnCardState *cs,
cs->hw.elsa.rcvcnt);
skb_queue_tail(& cs->hw.elsa.bcs->rqueue, skb);
}
hscx_sched_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
sched_b_event(cs->hw.elsa.bcs, B_RCVBUFREADY);
} else {
char tmp[128];
char *t = tmp;
......
......@@ -198,13 +198,6 @@ ReadZReg(struct IsdnCardState *cs, u_char reg)
return (val);
}
static void
hfc_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
static struct sk_buff
*hfc_empty_fifo(struct BCState *bcs, int count)
{
......@@ -368,7 +361,7 @@ hfc_fill_fifo(struct BCState *bcs)
} else {
bcs->tx_cnt -= bcs->tx_skb->len;
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->tx_skb = NULL;
}
WaitForBusy(cs);
......@@ -440,7 +433,7 @@ main_rec_2bds0(struct BCState *bcs)
cli();
skb_queue_tail(&bcs->rqueue, skb);
sti();
hfc_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
rcnt = f1 -f2;
if (rcnt<0)
......@@ -924,7 +917,7 @@ hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val)
} else
debugl1(cs,"fill_data %d blocked", bcs->channel);
} else {
hfc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......@@ -948,7 +941,7 @@ hfc2bds0_interrupt(struct IsdnCardState *cs, u_char val)
} else
debugl1(cs,"fill_data %d blocked", bcs->channel);
} else {
hfc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -82,13 +82,6 @@ ReadZReg(struct BCState *bcs, u_char reg)
return (val);
}
void
hfc_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
static void
hfc_clear_fifo(struct BCState *bcs)
{
......@@ -323,7 +316,7 @@ hfc_fill_fifo(struct BCState *bcs)
count = -1;
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->tx_skb = NULL;
if (bcs->mode != L1_MODE_TRANS) {
WaitForBusy(cs);
......@@ -384,7 +377,7 @@ main_irq_hfc(struct BCState *bcs)
/* sti(); */
if ((skb = hfc_empty_fifo(bcs, rcnt))) {
skb_queue_tail(&bcs->rqueue, skb);
hfc_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
receive = 1;
......@@ -407,7 +400,7 @@ main_irq_hfc(struct BCState *bcs)
transmit = 0;
} else {
transmit = 0;
hfc_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
restore_flags(flags);
......
......@@ -197,16 +197,6 @@ sched_event_D_pci(struct IsdnCardState *cs, int event)
schedule_work(&cs->work);
}
/*********************************/
/* schedule a new b_channel task */
/*********************************/
static void
hfcpci_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
/************************************************/
/* select a b-channel entry matching and active */
/************************************************/
......@@ -454,7 +444,7 @@ hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
cli();
skb_queue_tail(&bcs->rqueue, skb);
sti();
hfcpci_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
*z2r = cpu_to_le16(new_z2); /* new position */
......@@ -513,7 +503,7 @@ main_rec_hfcpci(struct BCState *bcs)
cli();
skb_queue_tail(&bcs->rqueue, skb);
sti();
hfcpci_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
rcnt = bz->f1 - bz->f2;
if (rcnt < 0)
......@@ -743,7 +733,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
}
bcs->tx_cnt -= bcs->tx_skb->len;
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
cli();
bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
......@@ -1055,7 +1045,7 @@ hfcpci_interrupt(int intno, void *dev_id, struct pt_regs *regs)
} else
debugl1(cs, "fill_data %d blocked", bcs->channel);
} else {
hfcpci_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......@@ -1079,7 +1069,7 @@ hfcpci_interrupt(int intno, void *dev_id, struct pt_regs *regs)
} else
debugl1(cs, "fill_data %d blocked", bcs->channel);
} else {
hfcpci_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -467,16 +467,6 @@ sched_event_D_sx(struct IsdnCardState *cs, int event)
schedule_work(&cs->work);
}
/*********************************/
/* schedule a new b_channel task */
/*********************************/
static void
hfcsx_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
/************************************************/
/* select a b-channel entry matching and active */
/************************************************/
......@@ -550,7 +540,7 @@ main_rec_hfcsx(struct BCState *bcs)
cli();
skb_queue_tail(&bcs->rqueue, skb);
sti();
hfcsx_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
......@@ -603,7 +593,7 @@ hfcsx_fill_fifo(struct BCState *bcs)
bcs->tx_cnt -= bcs->tx_skb->len;
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
}
......@@ -854,7 +844,7 @@ hfcsx_interrupt(int intno, void *dev_id, struct pt_regs *regs)
} else
debugl1(cs, "fill_data %d blocked", bcs->channel);
} else {
hfcsx_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......@@ -878,7 +868,7 @@ hfcsx_interrupt(int intno, void *dev_id, struct pt_regs *regs)
} else
debugl1(cs, "fill_data %d blocked", bcs->channel);
} else {
hfcsx_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -1407,3 +1407,9 @@ L4L3(struct PStack *st, int pr, void *arg)
st->l3.l4l3(st, pr, arg);
}
static inline void
sched_b_event(struct BCState *bcs, int event)
{
set_bit(event, &bcs->event);
schedule_work(&bcs->work);
}
......@@ -91,13 +91,6 @@ modehscx(struct BCState *bcs, int mode, int bc)
cs->BC_Write_Reg(cs, hscx, HSCX_ISTA, 0x00);
}
void
hscx_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
void
hscx_l2l1(struct PStack *st, int pr, void *arg)
{
......
......@@ -35,6 +35,5 @@
#define HSCX_MASK 0x20
extern int HscxVersion(struct IsdnCardState *cs, char *s);
extern void hscx_sched_event(struct BCState *bcs, int event);
extern void modehscx(struct BCState *bcs, int mode, int bc);
extern void inithscxisac(struct IsdnCardState *cs);
......@@ -185,7 +185,7 @@ hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
}
}
bcs->hw.hscx.rcvidx = 0;
hscx_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
if (val & 0x40) { /* RPF */
hscx_empty_fifo(bcs, fifo_size);
......@@ -198,7 +198,7 @@ hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
hscx_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
if (val & 0x10) { /* XPR */
......@@ -208,7 +208,7 @@ hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.hscx.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -217,7 +217,7 @@ hscx_interrupt(struct IsdnCardState *cs, u_char val, u_char hscx)
__hscx_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
hscx_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -46,7 +46,6 @@ static inline void dch_int(struct IsdnCardState *cs);
static void __devinit dch_setstack(struct PStack *st, struct IsdnCardState *cs);
static void __devinit dch_init(struct IsdnCardState *cs);
static void bch_l2l1(struct PStack *st, int pr, void *arg);
static void bch_sched_event(struct BCState *bcs, int event);
static void bch_empty_fifo(struct BCState *bcs, int count);
static void bch_fill_fifo(struct BCState *bcs);
static void bch_int(struct IsdnCardState *cs, u_char hscx);
......@@ -583,16 +582,6 @@ bch_l2l1(struct PStack *st, int pr, void *arg)
}
}
//----------------------------------------------------------
// proceed with bottom half handler BChannel_bh()
//----------------------------------------------------------
static void
bch_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
//----------------------------------------------------------
// Read B channel fifo to receive buffer
//----------------------------------------------------------
......@@ -730,7 +719,7 @@ bch_int(struct IsdnCardState *cs, u_char hscx)
}
}
bcs->hw.hscx.rcvidx = 0;
bch_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
if (istab &0x40) { // RPF
......@@ -745,7 +734,7 @@ bch_int(struct IsdnCardState *cs, u_char hscx)
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
bch_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
......@@ -762,7 +751,7 @@ bch_int(struct IsdnCardState *cs, u_char hscx)
goto afterXPR;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
bch_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.hscx.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -771,7 +760,7 @@ bch_int(struct IsdnCardState *cs, u_char hscx)
bch_fill_fifo(bcs);
} else {
clear_bit(BC_FLG_BUSY, &bcs->Flag);
bch_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
afterXPR:
......
......@@ -424,9 +424,6 @@ isar_load_firmware(struct IsdnCardState *cs, u_char *buf)
}
extern void BChannel_bh(struct BCState *);
#define B_LL_NOCARRIER 8
#define B_LL_CONNECT 9
#define B_LL_OK 10
static void
isar_bh(void *data)
......@@ -442,13 +439,6 @@ isar_bh(void *data)
ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_OK);
}
static void
isar_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
static inline void
send_DLE_ETX(struct BCState *bcs)
{
......@@ -458,7 +448,7 @@ send_DLE_ETX(struct BCState *bcs)
if ((skb = dev_alloc_skb(2))) {
memcpy(skb_put(skb, 2), dleetx, 2);
skb_queue_tail(&bcs->rqueue, skb);
isar_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
} else {
printk(KERN_WARNING "HiSax: skb out of memory\n");
}
......@@ -510,7 +500,7 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
if ((skb = dev_alloc_skb(ireg->clsb))) {
rcv_mbox(cs, ireg, (u_char *)skb_put(skb, ireg->clsb));
skb_queue_tail(&bcs->rqueue, skb);
isar_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
} else {
printk(KERN_WARNING "HiSax: skb out of memory\n");
cs->BC_Write_Reg(cs, 1, ISAR_IIA, 0);
......@@ -551,7 +541,7 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
memcpy(skb_put(skb, bcs->hw.isar.rcvidx-2),
bcs->hw.isar.rcvbuf, bcs->hw.isar.rcvidx-2);
skb_queue_tail(&bcs->rqueue, skb);
isar_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
bcs->hw.isar.rcvidx = 0;
}
......@@ -576,7 +566,7 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
insert_dle((u_char *)skb_put(skb, bcs->hw.isar.rcvidx),
bcs->hw.isar.rcvbuf, ireg->clsb);
skb_queue_tail(&bcs->rqueue, skb);
isar_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
if (ireg->cmsb & SART_NMD) { /* ABORT */
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "isar_rcv_frame: no more data");
......@@ -587,7 +577,7 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC,
0, NULL);
bcs->hw.isar.state = STFAX_ESCAPE;
isar_sched_event(bcs, B_LL_NOCARRIER);
sched_b_event(bcs, B_LL_NOCARRIER);
}
} else {
printk(KERN_WARNING "HiSax: skb out of memory\n");
......@@ -635,9 +625,9 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
bcs->hw.isar.rcvbuf,
bcs->hw.isar.rcvidx);
skb_queue_tail(&bcs->rqueue, skb);
isar_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
send_DLE_ETX(bcs);
isar_sched_event(bcs, B_LL_OK);
sched_b_event(bcs, B_LL_OK);
}
bcs->hw.isar.rcvidx = 0;
}
......@@ -651,7 +641,7 @@ isar_rcv_frame(struct IsdnCardState *cs, struct BCState *bcs)
sendmsg(cs, SET_DPS(bcs->hw.isar.dpath) |
ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC, 0, NULL);
bcs->hw.isar.state = STFAX_ESCAPE;
isar_sched_event(bcs, B_LL_NOCARRIER);
sched_b_event(bcs, B_LL_NOCARRIER);
}
break;
default:
......@@ -761,7 +751,7 @@ send_frames(struct BCState *bcs)
return;
} else {
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
isar_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
if (bcs->mode == L1_MODE_FAX) {
if (bcs->hw.isar.cmd == PCTRL_CMD_FTH) {
if (test_bit(BC_FLG_LASTDATA, &bcs->Flag)) {
......@@ -792,11 +782,11 @@ send_frames(struct BCState *bcs)
}
test_and_set_bit(BC_FLG_LL_OK, &bcs->Flag);
} else {
isar_sched_event(bcs, B_LL_CONNECT);
sched_b_event(bcs, B_LL_CONNECT);
}
}
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
isar_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
......@@ -1055,7 +1045,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
&bcs->Flag);
add_timer(&bcs->hw.isar.ftimer);
} else {
isar_sched_event(bcs, B_LL_CONNECT);
sched_b_event(bcs, B_LL_CONNECT);
}
} else {
if (cs->debug & L1_DEB_WARN)
......@@ -1100,10 +1090,10 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
}
} else if (bcs->hw.isar.state == STFAX_ACTIV) {
if (test_and_clear_bit(BC_FLG_LL_OK, &bcs->Flag)) {
isar_sched_event(bcs, B_LL_OK);
sched_b_event(bcs, B_LL_OK);
} else if (bcs->hw.isar.cmd == PCTRL_CMD_FRM) {
send_DLE_ETX(bcs);
isar_sched_event(bcs, B_LL_NOCARRIER);
sched_b_event(bcs, B_LL_NOCARRIER);
} else {
ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_FCERROR);
}
......@@ -1259,7 +1249,7 @@ ftimer_handler(struct BCState *bcs) {
bcs->Flag);
test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
isar_sched_event(bcs, B_LL_CONNECT);
sched_b_event(bcs, B_LL_CONNECT);
}
}
......
......@@ -17,9 +17,13 @@
#define D_TX_MON1 7
#define E_RCVBUFREADY 8
#define B_RCVBUFREADY 0
#define B_XMTBUFREADY 1
#define B_CMPLREADY 2
#define B_RCVBUFREADY 0
#define B_XMTBUFREADY 1
#define B_CMPLREADY 2
#define B_LL_NOCARRIER 8
#define B_LL_CONNECT 9
#define B_LL_OK 10
extern void debugl1(struct IsdnCardState *cs, char *fmt, ...);
extern void DChannel_proc_xmt(struct IsdnCardState *cs);
......
......@@ -164,7 +164,7 @@ jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
}
}
bcs->hw.hscx.rcvidx = 0;
jade_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
if (val & 0x40) { /* RPF */
jade_empty_fifo(bcs, fifo_size);
......@@ -177,7 +177,7 @@ jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.hscx.rcvidx = 0;
jade_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
if (val & 0x10) { /* XPR */
......@@ -187,7 +187,7 @@ jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
jade_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.hscx.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -196,7 +196,7 @@ jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade)
jade_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
jade_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
}
......
......@@ -432,8 +432,7 @@ static void got_frame(struct BCState *bcs, int count) {
memcpy(skb_put(skb, count), bcs->hw.tiger.rcvbuf, count);
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->event |= 1 << B_RCVBUFREADY;
schedule_work(&bcs->work);
sched_b_event(bcs, B_RCVBUFREADY);
if (bcs->cs->debug & L1_DEB_RECEIVE_FRAME)
printframe(bcs->cs, bcs->hw.tiger.rcvbuf, count, "rec");
......@@ -761,7 +760,7 @@ static void write_raw(struct BCState *bcs, u_int *buf, int cnt) {
debugl1(bcs->cs,"tiger write_raw: NULL skb s_cnt %d", s_cnt);
} else {
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
hscx_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->tx_skb = NULL;
}
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
......@@ -786,8 +785,7 @@ static void write_raw(struct BCState *bcs, u_int *buf, int cnt) {
debugl1(bcs->cs, "tiger write_raw: fill rest %d",
cnt - s_cnt);
}
bcs->event |= 1 << B_XMTBUFREADY;
schedule_work(&bcs->work);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
} else if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
......
......@@ -83,10 +83,6 @@ static void usb_b_out(struct st5481_bcs *bcs,int buf_nr)
// Frame sent
b_out->tx_skb = NULL;
B_L1L2(bcs, PH_DATA | CONFIRM, skb);
/* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */
/* st5481B_sched_event(bcs, B_XMTBUFREADY); */
/* } */
}
} else {
if (bcs->mode == L1_MODE_TRANS) {
......
......@@ -141,13 +141,6 @@ W6692_sched_event(struct IsdnCardState *cs, int event)
schedule_work(&cs->work);
}
static void
W6692B_sched_event(struct BCState *bcs, int event)
{
bcs->event |= 1 << event;
schedule_work(&bcs->work);
}
static void
W6692_empty_fifo(struct IsdnCardState *cs, int count)
{
......@@ -345,7 +338,7 @@ W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
}
}
bcs->hw.w6692.rcvidx = 0;
W6692B_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
if (val & W_B_EXI_RMR) { /* RMR */
W6692B_empty_fifo(bcs, W_B_FIFO_THRESH);
......@@ -358,7 +351,7 @@ W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
skb_queue_tail(&bcs->rqueue, skb);
}
bcs->hw.w6692.rcvidx = 0;
W6692B_sched_event(bcs, B_RCVBUFREADY);
sched_b_event(bcs, B_RCVBUFREADY);
}
}
if (val & W_B_EXI_XFR) { /* XFR */
......@@ -368,7 +361,7 @@ W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
return;
}
skb_queue_tail(&bcs->cmpl_queue, bcs->tx_skb);
W6692B_sched_event(bcs, B_CMPLREADY);
sched_b_event(bcs, B_CMPLREADY);
bcs->hw.w6692.count = 0;
}
if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
......@@ -377,7 +370,7 @@ W6692B_interrupt(struct IsdnCardState *cs, u_char bchan)
W6692B_fill_fifo(bcs);
} else {
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
W6692B_sched_event(bcs, B_XMTBUFREADY);
sched_b_event(bcs, B_XMTBUFREADY);
}
}
if (val & W_B_EXI_XDUN) { /* XDUN */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment