Commit 4ff8e934 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ohci unlink cleanups

Attached is a patch that cleans up a few more issues in the OHCI unlink
code.

There may still be an ISO-IN data problem, I'll look at that separately
since it seems unrelated to unlink issues.

- Simplify/correct ED lifecycle
	* UNLINK is now for real: descheduled and waiting for SOF
	* finish_unlinks() expects descheduled EDs (may reschedule)
	* only ed_deschedule() turns off hardware schedule processing
	* no more NEW state
	* no more ED_URB_DEL flag (it added extra states)
	* new IDLE state, "not scheduled" (replaces previous UNLINKing)
- Bugfixes
	* ed_get(), potential memleak is now gone
	* urb_enqueue(), won't submit to dead/sleeping hc
	* free_config(), rescans after SOF when needed
	* ed_schedule(), use wmb()
	* ed_schedule() and finish_unlinks(), more thorough about
	  restarting control or bulk processing
	* finish_unlinks(), more cautious about reentering
- General:
	* ed->ed_rm_list renamed ed_next; to be used more later
	* slightly shrink object code
	* rename some functions

This leaves one notable issue in the unlink paths:  the driver never waits
for SOF after descheduling (empty) EDs.  That's racey in most cases, though
there are a few light-traffic cases where that's correct (in part because
the ED is empty).  Easy to fix once the rest of this is known to behave.
parent 48a7ed7b
......@@ -10,8 +10,15 @@
* [ (C) Copyright 1999 Gregory P. Smith]
*
*
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
* interfaces (though some non-x86 Intel chips use it). It supports
* smarter hardware than UHCI. A download link for the spec available
* through the http://www.usb.org website.
*
* History:
*
* 2002/07/19 fixes to management of ED and schedule state.
* 2002/06/09 SA-1111 support (Christopher Hoover)
* 2002/06/01 remember frame when HC won't see EDs any more; use that info
* to fix urb unlink races caused by interrupt latency assumptions;
* minor ED field and function naming updates
......@@ -95,12 +102,12 @@
/*
* TO DO:
*
* - "disabled" should be the hcd state
* - "disabled" and "sleeping" should be in hcd->state
* - bandwidth alloc to generic code
* - lots more testing!!
*/
#define DRIVER_VERSION "2002-Jun-15"
#define DRIVER_VERSION "2002-Jul-19"
#define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
......@@ -140,6 +147,7 @@ static int ohci_urb_enqueue (
int i, size = 0;
unsigned long flags;
int bustime = 0;
int retval = 0;
#ifdef OHCI_VERBOSE_DEBUG
urb_print (urb, "SUB", usb_pipein (pipe));
......@@ -191,19 +199,25 @@ static int ohci_urb_enqueue (
return -ENOMEM;
memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *));
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
if (ohci->disabled || ohci->sleeping) {
retval = -ENODEV;
goto fail;
}
/* fill the private part of the URB */
urb_priv->length = size;
urb_priv->ed = ed;
/* allocate the TDs (updating hash chains) */
spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < size; i++) {
urb_priv->td [i] = td_alloc (ohci, SLAB_ATOMIC);
if (!urb_priv->td [i]) {
urb_priv->length = i;
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags);
return -ENOMEM;
retval = -ENOMEM;
goto fail;
}
}
......@@ -217,11 +231,11 @@ static int ohci_urb_enqueue (
switch (usb_pipetype (pipe)) {
case PIPE_ISOCHRONOUS:
if (urb->transfer_flags & USB_ISO_ASAP) {
urb->start_frame = ( (ed->state == ED_OPER)
urb->start_frame = ((ed->state != ED_IDLE)
? (ed->intriso.last_iso + 1)
: (le16_to_cpu (ohci->hcca->frame_no)
+ 10)) & 0xffff;
}
}
/* FALLTHROUGH */
case PIPE_INTERRUPT:
if (urb->bandwidth == 0) {
......@@ -238,18 +252,20 @@ static int ohci_urb_enqueue (
urb->hcpriv = urb_priv;
/* link the ed into a chain if is not already */
if (ed->state != ED_OPER)
ep_link (ohci, ed);
/* schedule the ed if needed */
if (ed->state == ED_IDLE)
ed_schedule (ohci, ed);
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
*/
td_submit_urb (urb);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags);
return 0;
return retval;
}
/*
......@@ -270,19 +286,17 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
if (!ohci->disabled) {
urb_priv_t *urb_priv;
/* flag the urb's data for deletion in some upcoming
* SF interrupt's delete list processing
/* Unless an IRQ completed the unlink while it was being
* handed to us, flag it for unlink and giveback, and force
* some upcoming INTR_SF to call finish_unlinks()
*/
spin_lock_irqsave (&ohci->lock, flags);
urb_priv = urb->hcpriv;
if (!urb_priv || (urb_priv->state == URB_DEL)) {
spin_unlock_irqrestore (&ohci->lock, flags);
return 0;
if (urb_priv) {
urb_priv->state = URB_DEL;
if (urb_priv->ed->state == ED_OPER)
start_urb_unlink (ohci, urb_priv->ed);
}
urb_priv->state = URB_DEL;
start_urb_unlink (ohci, urb_priv->ed);
spin_unlock_irqrestore (&ohci->lock, flags);
} else {
/*
......@@ -290,12 +304,16 @@ static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
* any more ... just clean up every urb's memory.
*/
finish_urb (ohci, urb);
}
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* frees config/altsetting state for endpoints,
* including ED memory, dummy TD, and bulk/intr data toggle
*/
static void
ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
{
......@@ -303,7 +321,11 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv;
int i;
unsigned long flags;
#ifdef DEBUG
int rescans = 0;
#endif
rescan:
/* free any eds, and dummy tds, still hanging around */
spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < 32; i++) {
......@@ -312,27 +334,47 @@ ohci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
if (!ed)
continue;
ed->state &= ~ED_URB_DEL;
if (ohci->disabled && ed->state == ED_OPER)
ed->state = ED_UNLINK;
if (ohci->disabled && ed->state != ED_IDLE)
ed->state = ED_IDLE;
switch (ed->state) {
case ED_NEW:
break;
case ED_UNLINK:
case ED_UNLINK: /* wait a frame? */
goto do_rescan;
case ED_IDLE: /* fully unlinked */
td_free (ohci, ed->dummy);
break;
case ED_OPER:
default:
#ifdef DEBUG
err ("illegal ED %d state in free_config, %d",
i, ed->state);
#ifdef DEBUG
BUG ();
#endif
/* ED_OPER: some driver disconnect() is broken,
* it didn't even start its unlinks much less wait
* for their completions.
* OTHERWISE: hcd bug, ed is garbage
*/
BUG ();
}
ed_free (ohci, ed);
}
spin_unlock_irqrestore (&ohci->lock, flags);
return;
do_rescan:
#ifdef DEBUG
/* a driver->disconnect() returned before its unlinks completed? */
if (in_interrupt ()) {
dbg ("WARNING: spin in interrupt; driver->disconnect() bug");
dbg ("dev usb-%s-%s ep 0x%x",
ohci->hcd.self.bus_name, udev->devpath, i);
}
BUG_ON (!(readl (&ohci->regs->intrenable) & OHCI_INTR_SF));
BUG_ON (rescans >= 2); /* HWBUG */
rescans++;
#endif
spin_unlock_irqrestore (&ohci->lock, flags);
wait_ms (1);
goto rescan;
}
static int ohci_get_frame (struct usb_hcd *hcd)
......
......@@ -170,50 +170,50 @@ static int ep_rev (int num_bits, int word)
/* link an ed into one of the HC chains */
static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
static void ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int int_branch, i;
int inter, interval, load;
__u32 *ed_p;
volatile struct ed *ed = edi;
ed->state = ED_OPER;
ed->hwNextED = 0;
wmb ();
/* we care about rm_list when setting CLE/BLE in case the HC was at
* work on some TD when CLE/BLE was turned off, and isn't quiesced
* yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
*/
switch (ed->type) {
case PIPE_CONTROL:
ed->hwNextED = 0;
if (ohci->ed_controltail == NULL) {
writel (ed->dma, &ohci->regs->ed_controlhead);
} else {
ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
}
ed->ed_prev = ohci->ed_controltail;
if (!ohci->ed_controltail
&& !ohci->ed_rm_list
&& !ohci->sleeping
) {
if (!ohci->ed_controltail && !ohci->ed_rm_list) {
ohci->hc_control |= OHCI_CTRL_CLE;
writel (0, &ohci->regs->ed_controlcurrent);
writel (ohci->hc_control, &ohci->regs->control);
}
ohci->ed_controltail = edi;
ohci->ed_controltail = ed;
break;
case PIPE_BULK:
ed->hwNextED = 0;
if (ohci->ed_bulktail == NULL) {
writel (ed->dma, &ohci->regs->ed_bulkhead);
} else {
ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
}
ed->ed_prev = ohci->ed_bulktail;
if (!ohci->ed_bulktail
&& !ohci->ed_rm_list
&& !ohci->sleeping
) {
if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
ohci->hc_control |= OHCI_CTRL_BLE;
writel (0, &ohci->regs->ed_bulkcurrent);
writel (ohci->hc_control, &ohci->regs->control);
}
ohci->ed_bulktail = edi;
ohci->ed_bulktail = ed;
break;
case PIPE_INTERRUPT:
......@@ -231,17 +231,16 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
ed->hwNextED = *ed_p;
*ed_p = cpu_to_le32 (ed->dma);
}
wmb ();
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_INT");
#endif
break;
case PIPE_ISOCHRONOUS:
ed->hwNextED = 0;
ed->interval = 1;
ed->ed_prev = ohci->ed_isotail;
if (ohci->ed_isotail != NULL) {
ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma);
ed->ed_prev = ohci->ed_isotail;
} else {
for ( i = 0; i < NUM_INTS; i += inter) {
inter = 1;
......@@ -251,15 +250,18 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
*ed_p = cpu_to_le32 (ed->dma);
}
ed->ed_prev = NULL;
}
ohci->ed_isotail = edi;
wmb ();
ohci->ed_isotail = ed;
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_ISO");
#endif
break;
}
return 0;
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
}
/*-------------------------------------------------------------------------*/
......@@ -288,9 +290,8 @@ static void periodic_unlink (
* just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed
* caller guarantees the ED has no active TDs.
*/
static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
{
int i;
......@@ -361,15 +362,14 @@ static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
break;
}
/* FIXME ED's "unlink" state is indeterminate;
* the HC might still be caching it (till SOF).
* - use ed_rm_list and finish_unlinks(), adding some state that
* prevents clobbering hw linkage before the appropriate SOF
* - a speedup: when only one urb is queued on the ed, save 1msec
* by making start_urb_unlink() use this routine to deschedule.
/* FIXME Except for a couple of exceptionally clean unlink cases
* (like unlinking the only c/b ED, with no TDs) HCs may still be
* caching this (till SOF).
*
* To avoid racing with the hardware, this needs to use ED_UNLINK
* and delay til next INTR_SF. Merge with start_urb_unlink().
*/
ed->state = ED_UNLINK;
return 0;
ed->state = ED_IDLE;
}
......@@ -403,35 +403,27 @@ static struct ed *ed_get (
spin_lock_irqsave (&ohci->lock, flags);
if (!(ed = dev->ep [ep])) {
struct td *td;
ed = ed_alloc (ohci, SLAB_ATOMIC);
if (!ed) {
/* out of memory */
goto done;
}
dev->ep [ep] = ed;
}
if (ed->state & ED_URB_DEL) {
/* pending unlink request */
ed = 0;
goto done;
}
if (ed->state == ED_NEW) {
struct td *td;
ed->hwINFO = ED_SKIP;
/* dummy td; end of td list for ed */
td = td_alloc (ohci, SLAB_ATOMIC);
if (!td) {
/* out of memory */
ed_free (ohci, ed);
ed = 0;
goto done;
}
ed->dummy = td;
ed->hwTailP = cpu_to_le32 (td->td_dma);
ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
ed->state = ED_UNLINK;
ed->state = ED_IDLE;
ed->type = type;
}
......@@ -439,7 +431,7 @@ static struct ed *ed_get (
* state/mode info. Currently the upper layers don't support such
* guarantees; we're lucky changing config/altsetting is rare.
*/
if (ed->state == ED_UNLINK) {
if (ed->state == ED_IDLE) {
u32 info;
info = usb_pipedevice (pipe);
......@@ -494,30 +486,13 @@ static struct ed *ed_get (
/*-------------------------------------------------------------------------*/
/* request unlinking of an endpoint from an operational HC.
* put the ep on the rm_list and stop the bulk or ctrl list
* put the ep on the rm_list
* real work is done at the next start frame (SF) hardware interrupt
*/
static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
/* already pending? */
if (ed->state & ED_URB_DEL)
return;
ed->state |= ED_URB_DEL;
ed->hwINFO |= ED_SKIP;
switch (ed->type) {
case PIPE_CONTROL: /* stop control list */
ohci->hc_control &= ~OHCI_CTRL_CLE;
writel (ohci->hc_control,
&ohci->regs->control);
break;
case PIPE_BULK: /* stop bulk list */
ohci->hc_control &= ~OHCI_CTRL_BLE;
writel (ohci->hc_control,
&ohci->regs->control);
break;
}
ed_deschedule (ohci, ed);
ed->state = ED_UNLINK;
/* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
......@@ -526,7 +501,7 @@ static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
*/
ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;
ed->ed_rm_list = ohci->ed_rm_list;
ed->ed_next = ohci->ed_rm_list;
ohci->ed_rm_list = ed;
/* enable SOF interrupt */
......@@ -744,13 +719,15 @@ static void td_done (struct urb *urb, struct td *td)
u32 tdINFO = le32_to_cpup (&td->hwINFO);
int cc = 0;
/* ISO ... drivers see per-TD length/status */
if (tdINFO & TD_ISO) {
u16 tdPSW = le16_to_cpu (td->hwPSW [0]);
int dlen = 0;
cc = (tdPSW >> 12) & 0xF;
if (cc >= 0x0E) /* hc didn't touch? */
return;
if (usb_pipeout (urb->pipe))
dlen = urb->iso_frame_desc [td->index].length;
else
......@@ -759,9 +736,11 @@ static void td_done (struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].actual_length = dlen;
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != 0)
#ifdef VERBOSE_DEBUG
if (cc != TD_CC_NOERROR)
dbg (" urb %p iso TD %p (%d) len %d CC %d",
urb, td, 1 + td->index, dlen, cc);
#endif
/* BULK, INT, CONTROL ... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
......@@ -783,7 +762,7 @@ static void td_done (struct urb *urb, struct td *td)
if (cc == TD_DATAUNDERRUN
&& !(urb->transfer_flags & URB_SHORT_NOT_OK))
cc = TD_CC_NOERROR;
if (cc != TD_CC_NOERROR) {
if (cc != TD_CC_NOERROR && cc < 0x0E) {
spin_lock (&urb->lock);
if (urb->status == -EINPROGRESS)
urb->status = cc_to_error [cc];
......@@ -801,7 +780,7 @@ static void td_done (struct urb *urb, struct td *td)
}
#ifdef VERBOSE_DEBUG
if (cc != 0)
if (cc != TD_CC_NOERROR && cc < 0x0E)
dbg (" urb %p TD %p (%d) CC %d, len=%d/%d",
urb, td, 1 + td->index, cc,
urb->actual_length,
......@@ -876,28 +855,39 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
{
struct ed *ed, **last;
int ctrl = 0, bulk = 0;
rescan_all:
for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
struct td *td, *td_next, *tdHeadP, *tdTailP;
u32 *td_p;
int unlinked;
int completed, modified;
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps. completion callbacks might prepend
* EDs to the list, they'll be checked next irq.
* frame counter wraps.
*/
if (tick_before (tick, ed->tick)) {
last = &ed->ed_rm_list;
if (tick_before (tick, ed->tick) && !ohci->disabled) {
last = &ed->ed_next;
continue;
}
*last = ed->ed_rm_list;
ed->ed_rm_list = 0;
unlinked = 0;
/* unlink urbs from first one requested to queue end;
* leave earlier urbs alone
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
*last = ed->ed_next;
ed->ed_next = 0;
modified = 0;
/* unlink urbs as requested, but rescan the list after
* we call a completion since it might have unlinked
* another (earlier) urb
*
* FIXME use td_list to scan, not ed hashtables.
* completely abolish ed hashtables!
*/
rescan_this:
completed = 0;
tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP));
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
td_p = &ed->hwHeadP;
......@@ -908,21 +898,18 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
td_next = dma_to_td (ohci,
le32_to_cpup (&td->hwNextTD));
if (unlinked || (urb_priv->state == URB_DEL)) {
u32 tdINFO = le32_to_cpup (&td->hwINFO);
unlinked = 1;
if (urb_priv->state == URB_DEL) {
/* HC may have partly processed this TD */
if (TD_CC_GET (tdINFO) < 0xE)
td_done (urb, td);
td_done (urb, td);
urb_priv->td_cnt++;
*td_p = td->hwNextTD | (*td_p
& __constant_cpu_to_le32 (0x3));
/* URB is done; clean up */
if (++ (urb_priv->td_cnt) == urb_priv->length) {
if (urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
if (urb_priv->td_cnt == urb_priv->length) {
modified = completed = 1;
spin_unlock (&ohci->lock);
finish_urb (ohci, urb);
spin_lock (&ohci->lock);
......@@ -932,49 +919,52 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
}
}
/* FIXME actually want four cases here:
* (a) finishing URB unlink
* [a1] no URBs queued, so start ED unlink
* [a2] some (earlier) URBs still linked, re-enable
* (b) finishing ED unlink
* [b1] no URBs queued, ED is truly idle now
* ... we could set state ED_NEW and free dummy
* [b2] URBs now queued, link ED back into schedule
* right now we only have (a)
*/
ed->state &= ~ED_URB_DEL;
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
if (tdHeadP == tdTailP) {
if (ed->state == ED_OPER)
start_ed_unlink (ohci, ed);
} else
ed->hwINFO &= ~ED_SKIP;
/* ED's now officially unlinked, hc doesn't see */
ed->state = ED_IDLE;
ed->hwINFO &= ~ED_SKIP;
ed->hwHeadP &= ~cpu_to_le32 (ED_H);
ed->hwNextED = 0;
switch (ed->type) {
case PIPE_CONTROL:
ctrl = 1;
break;
case PIPE_BULK:
bulk = 1;
break;
/* but if there's work queued, reschedule */
tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
if (tdHeadP != tdTailP) {
if (completed)
goto rescan_this;
if (!ohci->disabled && !ohci->sleeping)
ed_schedule (ohci, ed);
}
if (modified)
goto rescan_all;
}
/* maybe reenable control and bulk lists */
if (!ohci->disabled) {
if (ctrl) /* reset control list */
writel (0, &ohci->regs->ed_controlcurrent);
if (bulk) /* reset bulk list */
writel (0, &ohci->regs->ed_bulkcurrent);
if (!ohci->ed_rm_list) {
if (ohci->ed_controltail)
ohci->hc_control |= OHCI_CTRL_CLE;
if (ohci->ed_bulktail)
ohci->hc_control |= OHCI_CTRL_BLE;
writel (ohci->hc_control, &ohci->regs->control);
if (!ohci->disabled && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
command |= OHCI_CLF;
if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
control |= OHCI_CTRL_CLE;
writel (0, &ohci->regs->ed_controlcurrent);
}
}
}
if (ohci->ed_bulktail) {
command |= OHCI_BLF;
if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
control |= OHCI_CTRL_BLE;
writel (0, &ohci->regs->ed_bulkcurrent);
}
}
/* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
if (control) {
ohci->hc_control |= control;
writel (ohci->hc_control, &ohci->regs->control);
}
if (command)
writel (command, &ohci->regs->cmdstatus);
}
}
......@@ -1026,7 +1016,7 @@ static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK))
== ed->hwTailP
&& (ed->state == ED_OPER))
start_ed_unlink (ohci, ed);
ed_deschedule (ohci, ed);
td = td_next;
}
spin_unlock_irqrestore (&ohci->lock, flags);
......
......@@ -31,15 +31,21 @@ struct ed {
/* rest are purely for the driver's use */
dma_addr_t dma; /* addr of ED */
struct td *dummy; /* next TD to activate */
/* host's view of schedule */
struct ed *ed_next; /* on schedule or rm_list */
struct ed *ed_prev; /* for non-interrupt EDs */
struct td *dummy;
struct list_head td_list; /* "shadow list" of our TDs */
u8 state; /* ED_{NEW,UNLINK,OPER} */
#define ED_NEW 0x00 /* unused, no dummy td */
#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */
#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */
#define ED_URB_DEL 0x08 /* for unlinking; masked in */
/* create --> IDLE --> OPER --> ... --> IDLE --> destroy
* usually: OPER --> UNLINK --> (IDLE | OPER) --> ...
* some special cases : OPER --> IDLE ...
*/
u8 state; /* ED_{IDLE,UNLINK,OPER} */
#define ED_IDLE 0x00 /* NOT linked to HC */
#define ED_UNLINK 0x01 /* being unlinked from hc */
#define ED_OPER 0x02 /* IS linked to hc */
u8 type; /* PIPE_{BULK,...} */
u16 interval; /* interrupt, isochronous */
......@@ -53,7 +59,6 @@ struct ed {
/* HC may see EDs on rm_list until next frame (frame_no == tick) */
u16 tick;
struct ed *ed_rm_list;
} __attribute__ ((aligned(16)));
#define ED_MASK ((u32)~0x0f) /* strip hw status in low addr bits */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment