Commit 0f9becac authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ehci does interrupt queuing

This patch makes EHCI

  - Share the same TD queueing code for control, bulk,
    and interrupt traffic;
  - Queue interrupt transfers, modifying the code for
    urb submit/unlink/complete;
  - Thinner, by removing lots of nasty fatty special case
    logic for interrupt transfers (size, no queueing, etc);
  - Grow some "automagic resubmit" logic, ready to be
    ripped out soonish;
  - Package its interrupt scheduling so it can be called
    from more places.
parent b2ade7ec
...@@ -65,6 +65,8 @@ ...@@ -65,6 +65,8 @@
* *
* HISTORY: * HISTORY:
* *
* 2002-08-06 Handling for bulk and interrupt transfers is mostly shared;
* only scheduling is different, no arbitrary limitations.
* 2002-07-25 Sanity check PCI reads, mostly for better cardbus support, * 2002-07-25 Sanity check PCI reads, mostly for better cardbus support,
* clean up HC run state handshaking. * clean up HC run state handshaking.
* 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts * 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts
...@@ -85,7 +87,7 @@ ...@@ -85,7 +87,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4 * 2001-June Works with usb-storage and NEC EHCI on 2.4
*/ */
#define DRIVER_VERSION "2002-Jul-25" #define DRIVER_VERSION "2002-Aug-06"
#define DRIVER_AUTHOR "David Brownell" #define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
...@@ -93,6 +95,8 @@ ...@@ -93,6 +95,8 @@
// #define EHCI_VERBOSE_DEBUG // #define EHCI_VERBOSE_DEBUG
// #define have_split_iso // #define have_split_iso
#define INTR_AUTOMAGIC /* to be removed later in 2.5 */
/* magic numbers that can affect system performance */ /* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */ #define EHCI_TUNE_RL_HS 0 /* nak throttle; see 4.9 */
...@@ -618,7 +622,8 @@ static void ehci_irq (struct usb_hcd *hcd) ...@@ -618,7 +622,8 @@ static void ehci_irq (struct usb_hcd *hcd)
* *
* hcd-specific init for hcpriv hasn't been done yet * hcd-specific init for hcpriv hasn't been done yet
* *
* NOTE: EHCI queues control and bulk requests transparently, like OHCI. * NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/ */
static int ehci_urb_enqueue ( static int ehci_urb_enqueue (
struct usb_hcd *hcd, struct usb_hcd *hcd,
...@@ -694,17 +699,35 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) ...@@ -694,17 +699,35 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
if (qh->qh_state == QH_STATE_LINKED) if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh); start_unlink_async (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
return 0; break;
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
intr_deschedule (ehci, urb->start_frame, qh, if (qh->qh_state == QH_STATE_LINKED) {
(urb->dev->speed == USB_SPEED_HIGH) /* messy, can spin or block a microframe ... */
? urb->interval intr_deschedule (ehci, qh, 1);
: (urb->interval << 3)); /* qh_state == IDLE */
if (ehci->hcd.state == USB_STATE_HALT) }
urb->status = -ESHUTDOWN; qh_completions (ehci, qh);
qh_completions (ehci, qh, 1);
return 0; /* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state)) {
int status;
spin_lock_irqsave (&ehci->lock, flags);
status = qh_schedule (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
if (status != 0) {
// shouldn't happen often, but ...
// FIXME kill those tds' urbs
err ("can't reschedule qh %p, err %d",
qh, status);
}
return status;
}
break;
case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
// itd or sitd ... // itd or sitd ...
......
...@@ -159,30 +159,6 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token) ...@@ -159,30 +159,6 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
} }
} }
static void ehci_urb_complete (
struct ehci_hcd *ehci,
dma_addr_t addr,
struct urb *urb
) {
if (urb->transfer_buffer_length && usb_pipein (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev, addr,
urb->transfer_buffer_length,
PCI_DMA_FROMDEVICE);
/* cleanse status if we saw no error */
if (likely (urb->status == -EINPROGRESS)) {
if (urb->actual_length != urb->transfer_buffer_length
&& (urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = -EREMOTEIO;
else
urb->status = 0;
}
/* only report unlinks once */
if (likely (urb->status != -ENOENT && urb->status != -ENOTCONN))
urb->complete (urb);
}
/* urb->lock ignored from here on (hcd is done with urb) */ /* urb->lock ignored from here on (hcd is done with urb) */
static void ehci_urb_done ( static void ehci_urb_done (
...@@ -190,6 +166,11 @@ static void ehci_urb_done ( ...@@ -190,6 +166,11 @@ static void ehci_urb_done (
dma_addr_t addr, dma_addr_t addr,
struct urb *urb struct urb *urb
) { ) {
#ifdef INTR_AUTOMAGIC
struct urb *resubmit = 0;
struct usb_device *dev = 0;
#endif
if (urb->transfer_buffer_length) if (urb->transfer_buffer_length)
pci_unmap_single (ehci->hcd.pdev, pci_unmap_single (ehci->hcd.pdev,
addr, addr,
...@@ -198,7 +179,23 @@ static void ehci_urb_done ( ...@@ -198,7 +179,23 @@ static void ehci_urb_done (
? PCI_DMA_FROMDEVICE ? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE); : PCI_DMA_TODEVICE);
if (likely (urb->hcpriv != 0)) { if (likely (urb->hcpriv != 0)) {
qh_put (ehci, (struct ehci_qh *) urb->hcpriv); struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32 (0x00ff)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci->hcd.self.bandwidth_int_reqs--;
#ifdef INTR_AUTOMAGIC
if (!((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
resubmit = usb_get_urb (urb);
dev = urb->dev;
}
#endif
}
qh_put (ehci, qh);
urb->hcpriv = 0; urb->hcpriv = 0;
} }
...@@ -210,33 +207,46 @@ static void ehci_urb_done ( ...@@ -210,33 +207,46 @@ static void ehci_urb_done (
urb->status = 0; urb->status = 0;
} }
/* hand off urb ownership */
usb_hcd_giveback_urb (&ehci->hcd, urb); usb_hcd_giveback_urb (&ehci->hcd, urb);
#ifdef INTR_AUTOMAGIC
if (resubmit && ((urb->status == -ENOENT)
|| (urb->status == -ECONNRESET))) {
usb_put_urb (resubmit);
resubmit = 0;
}
// device drivers will soon be doing something like this
if (resubmit) {
int status;
resubmit->dev = dev;
status = usb_submit_urb (resubmit, SLAB_KERNEL);
if (status != 0)
err ("can't resubmit interrupt urb %p: status %d",
resubmit, status);
usb_put_urb (resubmit);
}
#endif
} }
/* /*
* Process completed qtds for a qh, issuing completions if needed. * Process completed qtds for a qh, issuing completions if needed.
* When freeing: frees qtds, unmaps buf, returns URB to driver. * Frees qtds, unmaps buf, returns URB to driver.
* When not freeing (queued periodic qh): retain qtds, mapping, and urb.
* Races up to qh->hw_current; returns number of urb completions. * Races up to qh->hw_current; returns number of urb completions.
*/ */
static int static void
qh_completions ( qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
struct ehci_hcd *ehci, {
struct ehci_qh *qh,
int freeing
) {
struct ehci_qtd *qtd, *last; struct ehci_qtd *qtd, *last;
struct list_head *next, *qtd_list = &qh->qtd_list; struct list_head *next, *qtd_list = &qh->qtd_list;
int unlink = 0, halted = 0; int unlink = 0, halted = 0;
unsigned long flags; unsigned long flags;
int retval = 0;
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
if (unlikely (list_empty (qtd_list))) { if (unlikely (list_empty (qtd_list))) {
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
return retval; return;
} }
/* scan QTDs till end of list, or we reach an active one */ /* scan QTDs till end of list, or we reach an active one */
...@@ -253,14 +263,8 @@ qh_completions ( ...@@ -253,14 +263,8 @@ qh_completions (
if (likely (last->urb != urb)) { if (likely (last->urb != urb)) {
/* complete() can reenter this HCD */ /* complete() can reenter this HCD */
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
if (likely (freeing != 0)) ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_urb_done (ehci, last->buf_dma,
last->urb);
else
ehci_urb_complete (ehci, last->buf_dma,
last->urb);
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
retval++;
} }
/* qh overlays can have HC's old cached copies of /* qh overlays can have HC's old cached copies of
...@@ -272,7 +276,6 @@ qh_completions ( ...@@ -272,7 +276,6 @@ qh_completions (
qh->hw_qtd_next = last->hw_next; qh->hw_qtd_next = last->hw_next;
} }
if (likely (freeing != 0))
ehci_qtd_free (ehci, last); ehci_qtd_free (ehci, last);
last = 0; last = 0;
} }
...@@ -290,7 +293,7 @@ qh_completions ( ...@@ -290,7 +293,7 @@ qh_completions (
/* fault: unlink the rest, since this qtd saw an error? */ /* fault: unlink the rest, since this qtd saw an error? */
if (unlikely ((token & QTD_STS_HALT) != 0)) { if (unlikely ((token & QTD_STS_HALT) != 0)) {
freeing = unlink = 1; unlink = 1;
/* status copied below */ /* status copied below */
/* QH halts only because of fault (above) or unlink (here). */ /* QH halts only because of fault (above) or unlink (here). */
...@@ -298,13 +301,14 @@ qh_completions ( ...@@ -298,13 +301,14 @@ qh_completions (
/* unlinking everything because of HC shutdown? */ /* unlinking everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) { if (ehci->hcd.state == USB_STATE_HALT) {
freeing = unlink = 1; unlink = 1;
/* explicit unlink, maybe starting here? */ /* explicit unlink, maybe starting here? */
} else if (qh->qh_state == QH_STATE_IDLE } else if (qh->qh_state == QH_STATE_IDLE
&& (urb->status == -ECONNRESET && (urb->status == -ECONNRESET
|| urb->status == -ESHUTDOWN
|| urb->status == -ENOENT)) { || urb->status == -ENOENT)) {
freeing = unlink = 1; unlink = 1;
/* QH halted to unlink urbs _after_ this? */ /* QH halted to unlink urbs _after_ this? */
} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) { } else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
...@@ -332,31 +336,7 @@ qh_completions ( ...@@ -332,31 +336,7 @@ qh_completions (
qtd_copy_status (urb, qtd->length, token); qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock); spin_unlock (&urb->lock);
/*
* NOTE: this won't work right with interrupt urbs that
* need multiple qtds ... only the first scan of qh->qtd_list
* starts at the right qtd, yet multiple scans could happen
* for transfers that are scheduled across multiple uframes.
* (Such schedules are not currently allowed!)
*/
if (likely (freeing != 0))
list_del (&qtd->qtd_list); list_del (&qtd->qtd_list);
else {
/* restore everything the HC could change
* from an interrupt QTD
*/
qtd->hw_token = (qtd->hw_token
& __constant_cpu_to_le32 (0x8300))
| cpu_to_le32 (qtd->length << 16)
| __constant_cpu_to_le32 (QTD_STS_ACTIVE
| (EHCI_TUNE_CERR << 10));
qtd->hw_buf [0] &= ~__constant_cpu_to_le32 (0x0fff);
/* this offset, and the length above,
* are likely wrong on QTDs #2..N
*/
qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
}
#if 0 #if 0
if (urb->status == -EINPROGRESS) if (urb->status == -EINPROGRESS)
...@@ -384,14 +364,9 @@ qh_completions ( ...@@ -384,14 +364,9 @@ qh_completions (
/* last urb's completion might still need calling */ /* last urb's completion might still need calling */
if (likely (last != 0)) { if (likely (last != 0)) {
if (likely (freeing != 0)) {
ehci_urb_done (ehci, last->buf_dma, last->urb); ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last); ehci_qtd_free (ehci, last);
} else
ehci_urb_complete (ehci, last->buf_dma, last->urb);
retval++;
} }
return retval;
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -659,7 +634,7 @@ ehci_qh_make ( ...@@ -659,7 +634,7 @@ ehci_qh_make (
if (type == PIPE_INTERRUPT) { if (type == PIPE_INTERRUPT) {
qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0, qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
hb_mult (maxp) * max_packet (maxp)); hb_mult (maxp) * max_packet (maxp));
qh->start = ~0; qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) { if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0; qh->c_usecs = 0;
...@@ -742,8 +717,12 @@ ehci_qh_make ( ...@@ -742,8 +717,12 @@ ehci_qh_make (
qh->hw_info2 = cpu_to_le32 (info2); qh->hw_info2 = cpu_to_le32 (info2);
/* initialize sw and hw queues with these qtds */ /* initialize sw and hw queues with these qtds */
if (!list_empty (qtd_list)) {
list_splice (qtd_list, &qh->qtd_list); list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list)); qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
} else {
qh->hw_qtd_next = qh->hw_alt_next = EHCI_LIST_END;
}
/* initialize data toggle state */ /* initialize data toggle state */
clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh); clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh);
...@@ -813,25 +792,29 @@ static struct ehci_qh *qh_append_tds ( ...@@ -813,25 +792,29 @@ static struct ehci_qh *qh_append_tds (
qh = (struct ehci_qh *) *ptr; qh = (struct ehci_qh *) *ptr;
if (likely (qh != 0)) { if (likely (qh != 0)) {
struct ehci_qtd *qtd; struct ehci_qtd *qtd;
u32 hw_next;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); if (unlikely (list_empty (qtd_list)))
hw_next = QTD_NEXT (qtd->qtd_dma); qtd = 0;
else
qtd = list_entry (qtd_list->next, struct ehci_qtd,
qtd_list);
/* maybe patch the qh used for set_address */ /* maybe patch the qh used for set_address */
if (unlikely (epnum == 0 if (unlikely (epnum == 0
&& le32_to_cpu (qh->hw_info1 & 0x7f) == 0)) && le32_to_cpu (qh->hw_info1 & 0x7f) == 0))
qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe)); qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe));
/* is an URB is queued to this qh already? */ /* append to tds already queued to this qh? */
if (unlikely (!list_empty (&qh->qtd_list))) { if (unlikely (!list_empty (&qh->qtd_list) && qtd)) {
struct ehci_qtd *last_qtd; struct ehci_qtd *last_qtd;
int short_rx = 0; int short_rx = 0;
u32 hw_next;
/* update the last qtd's "next" pointer */ /* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh); // dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev, last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list); struct ehci_qtd, qtd_list);
hw_next = QTD_NEXT (qtd->qtd_dma);
last_qtd->hw_next = hw_next; last_qtd->hw_next = hw_next;
/* previous urb allows short rx? maybe optimize. */ /* previous urb allows short rx? maybe optimize. */
...@@ -872,6 +855,7 @@ static struct ehci_qh *qh_append_tds ( ...@@ -872,6 +855,7 @@ static struct ehci_qh *qh_append_tds (
clear_toggle (urb->dev, clear_toggle (urb->dev,
epnum & 0x0f, !(epnum & 0x10), qh); epnum & 0x0f, !(epnum & 0x10), qh);
} }
if (qtd)
qh_update (qh, qtd); qh_update (qh, qtd);
} }
list_splice (qtd_list, qh->qtd_list.prev); list_splice (qtd_list, qh->qtd_list.prev);
...@@ -946,7 +930,7 @@ static void end_unlink_async (struct ehci_hcd *ehci) ...@@ -946,7 +930,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
ehci->reclaim = 0; ehci->reclaim = 0;
ehci->reclaim_ready = 0; ehci->reclaim_ready = 0;
qh_completions (ehci, qh, 1); qh_completions (ehci, qh);
// unlink any urb should now unlink all following urbs, so that // unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones. // relinking only happens for urbs before the unlinked ones.
...@@ -1046,7 +1030,7 @@ static void scan_async (struct ehci_hcd *ehci) ...@@ -1046,7 +1030,7 @@ static void scan_async (struct ehci_hcd *ehci)
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
/* concurrent unlink could happen here */ /* concurrent unlink could happen here */
qh_completions (ehci, qh, 1); qh_completions (ehci, qh);
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
qh_put (ehci, qh); qh_put (ehci, qh);
......
...@@ -220,23 +220,23 @@ static int disable_periodic (struct ehci_hcd *ehci) ...@@ -220,23 +220,23 @@ static int disable_periodic (struct ehci_hcd *ehci)
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
// FIXME microframe periods not yet handled
static void intr_deschedule ( static void intr_deschedule (
struct ehci_hcd *ehci, struct ehci_hcd *ehci,
unsigned frame,
struct ehci_qh *qh, struct ehci_qh *qh,
unsigned period int wait
) { ) {
unsigned long flags; unsigned long flags;
int status; int status;
unsigned frame = qh->start;
period >>= 3; // FIXME microframe periods not handled yet
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
do { do {
periodic_unlink (ehci, frame, qh); periodic_unlink (ehci, frame, qh);
qh_put (ehci, qh); qh_put (ehci, qh);
frame += period; frame += qh->period;
} while (frame < ehci->periodic_size); } while (frame < ehci->periodic_size);
qh->qh_state = QH_STATE_UNLINK; qh->qh_state = QH_STATE_UNLINK;
...@@ -258,14 +258,28 @@ static void intr_deschedule ( ...@@ -258,14 +258,28 @@ static void intr_deschedule (
* (yeech!) to be sure it's done. * (yeech!) to be sure it's done.
* No other threads may be mucking with this qh. * No other threads may be mucking with this qh.
*/ */
if (!status && ((ehci_get_frame (&ehci->hcd) - frame) % period) == 0) if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
if (wait) {
udelay (125); udelay (125);
qh->hw_next = EHCI_LIST_END;
} else {
/* we may not be IDLE yet, but if the qh is empty
* the race is very short. then if qh also isn't
* rescheduled soon, it won't matter. otherwise...
*/
vdbg ("intr_deschedule...");
}
} else
qh->hw_next = EHCI_LIST_END;
qh->qh_state = QH_STATE_IDLE; qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
/* update per-qh bandwidth utilization (for usbfs) */
ehci->hcd.self.bandwidth_allocated -=
(qh->usecs + qh->c_usecs) / qh->period;
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d", vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame, qh, qh->period, frame,
atomic_read (&qh->refcount), ehci->periodic_sched); atomic_read (&qh->refcount), ehci->periodic_sched);
} }
...@@ -309,157 +323,108 @@ static int check_period ( ...@@ -309,157 +323,108 @@ static int check_period (
return 1; return 1;
} }
static int intr_submit ( static int check_intr_schedule (
struct ehci_hcd *ehci, struct ehci_hcd *ehci,
struct urb *urb, unsigned frame,
struct list_head *qtd_list, unsigned uframe,
int mem_flags const struct ehci_qh *qh,
) { u32 *c_maskp
unsigned epnum; )
unsigned long flags; {
struct ehci_qh *qh; int retval = -ENOSPC;
struct hcd_dev *dev;
int is_input;
int status = 0;
/* get endpoint and transfer/schedule data */ if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
epnum = usb_pipeendpoint (urb->pipe); goto done;
is_input = usb_pipein (urb->pipe); if (!qh->c_usecs) {
if (is_input) retval = 0;
epnum |= 0x10; *c_maskp = cpu_to_le32 (0);
goto done;
}
/* /* This is a split transaction; check the bandwidth available for
* NOTE: current completion/restart logic doesn't handle more than * the completion too. Check both worst and best case gaps: worst
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this. * case is SPLIT near uframe end, and CSPLIT near start ... best is
* such big requests need many periods to transfer. * vice versa. Difference can be almost two uframe times, but we
* reserve unnecessary bandwidth (waste it) this way. (Actually
* even better cases exist, like immediate device NAK.)
*
* FIXME don't even bother unless we know this TT is idle in that
* range of uframes ... for now, check_period() allows only one
* interrupt transfer per frame, so needn't check "TT busy" status
* when scheduling a split (QH, SITD, or FSTN).
* *
* FIXME want to change hcd core submit model to expect queuing * FIXME ehci 0.96 and above can use FSTNs
* for all transfer types ... not just ISO and (with flag) BULK.
* that means: getting rid of this check; handling the "interrupt
* urb already queued" case below like bulk queuing is handled (no
* errors possible!); and completly getting rid of that annoying
* qh restart logic. simpler/smaller overall, and more flexible.
*/ */
if (unlikely (qtd_list->next != qtd_list->prev)) { if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
dbg ("only one intr qtd per urb allowed"); qh->period, qh->c_usecs))
status = -EINVAL; goto done;
if (!check_period (ehci, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
goto done; goto done;
}
spin_lock_irqsave (&ehci->lock, flags);
/* get the qh (must be empty and idle) */ *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
dev = (struct hcd_dev *)urb->dev->hcpriv; retval = 0;
qh = (struct ehci_qh *) dev->ep [epnum]; done:
if (qh) { return retval;
/* only allow one queued interrupt urb per EP */ }
if (unlikely (qh->qh_state != QH_STATE_IDLE
|| !list_empty (&qh->qtd_list))) {
dbg ("interrupt urb already queued");
status = -EBUSY;
} else {
/* maybe reset hardware's data toggle in the qh */
if (unlikely (!usb_gettoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10)))) {
qh->hw_token |=
__constant_cpu_to_le32 (QTD_TOGGLE);
usb_settoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10), 1);
}
/* trust the QH was set up as interrupt ... */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next,
struct ehci_qtd, qtd_list));
qtd_list = &qh->qtd_list;
}
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg ("new INTR qh %p", qh);
dev->ep [epnum] = qh;
qtd_list = &qh->qtd_list;
} else
status = -ENOMEM;
}
/* Schedule this periodic QH. */ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (likely (status == 0)) { {
unsigned frame = qh->period; int status;
unsigned uframe;
u32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh->hw_next = EHCI_LIST_END; qh->hw_next = EHCI_LIST_END;
frame = qh->start;
urb->hcpriv = qh_get (qh); /* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC; status = -ENOSPC;
}
/* pick a set of schedule slots, link the QH into them */ /* else scan the schedule to find a group of slots such that all
do { * uframes have enough periodic bandwidth available.
unsigned uframe;
u32 c_mask = 0;
/* pick a set of slots such that all uframes have
* enough periodic bandwidth available.
*/ */
frame--; if (status) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) { for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe, status = check_intr_schedule (ehci,
qh->period, qh->usecs) == 0) frame, uframe, qh,
continue; &c_mask);
if (status == 0)
/* If this is a split transaction, check the
* bandwidth available for the completion
* too. check both best and worst case gaps:
* worst case is SPLIT near uframe end, and
* CSPLIT near start ... best is vice versa.
* Difference can be almost two uframe times.
*
* FIXME don't even bother unless we know
* this TT is idle in that uframe ... right
* now we know only one interrupt transfer
* will be scheduled per frame, so we don't
* need to update/check TT state when we
* schedule a split (QH, SITD, or FSTN).
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!qh->c_usecs)
break;
if (check_period (ehci, frame,
uframe + qh->gap_uf,
qh->period, qh->c_usecs) == 0)
continue;
if (check_period (ehci, frame,
uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs) == 0)
continue;
c_mask = 0x03 << (8 + uframe + qh->gap_uf);
c_mask = cpu_to_le32 (c_mask);
break; break;
} }
if (uframe == 8) } while (status && --frame);
continue; if (status)
goto done;
/* QH will run once each period, starting there */ qh->start = frame;
urb->start_frame = qh->start = frame;
status = 0;
/* reset S-frame and (maybe) C-frame masks */ /* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= ~0xffff; qh->hw_info2 &= ~0xffff;
qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
// dbg_qh ("Schedule INTR qh", ehci, qh); } else
dbg ("reused previous qh %p schedule", qh);
/* stuff into the periodic schedule */ /* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED; qh->qh_state = QH_STATE_LINKED;
dbg ("qh %p usecs %d/%d period %d.0 " dbg ("qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
"starting %d.%d (gap %d)", qh, qh->usecs, qh->c_usecs,
qh, qh->usecs, qh->c_usecs, qh->period, qh->period, frame, uframe, qh->gap_uf);
frame, uframe, qh->gap_uf);
do { do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) { if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period, // FIXME -- just link toward the end, before any qh with a shorter period,
// AND handle it already being (implicitly) linked into this frame // AND accomodate it already having been linked here (after some other qh)
// AS WELL AS updating the check_period() logic // AS WELL AS updating the schedule checking logic
BUG (); BUG ();
} else { } else {
ehci->pshadow [frame].qh = qh_get (qh); ehci->pshadow [frame].qh = qh_get (qh);
...@@ -470,19 +435,61 @@ static int intr_submit ( ...@@ -470,19 +435,61 @@ static int intr_submit (
frame += qh->period; frame += qh->period;
} while (frame < ehci->periodic_size); } while (frame < ehci->periodic_size);
/* update bandwidth utilization records (for usbfs) */ /* update per-qh bandwidth for usbfs */
usb_claim_bandwidth (urb->dev, urb, ehci->hcd.self.bandwidth_allocated +=
(qh->usecs + qh->c_usecs) / qh->period, 0); (qh->usecs + qh->c_usecs) / qh->period;
/* maybe enable periodic schedule processing */ /* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++) if (!ehci->periodic_sched++)
status = enable_periodic (ehci); status = enable_periodic (ehci);
break; done:
return status;
}
static int intr_submit (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
int is_input;
int status = 0;
struct list_head empty;
} while (frame); /* get endpoint and transfer/schedule data */
epnum = usb_pipeendpoint (urb->pipe);
is_input = usb_pipein (urb->pipe);
if (is_input)
epnum |= 0x10;
spin_lock_irqsave (&ehci->lock, flags);
dev = (struct hcd_dev *)urb->dev->hcpriv;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
if (qh == 0) {
status = -ENOMEM;
goto done;
} }
spin_unlock_irqrestore (&ehci->lock, flags); if (qh->qh_state == QH_STATE_IDLE) {
if ((status = qh_schedule (ehci, qh)) != 0)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
BUG_ON (qh == 0);
/* ... update usbfs periodic stats */
ehci->hcd.self.bandwidth_int_reqs++;
done: done:
spin_unlock_irqrestore (&ehci->lock, flags);
if (status) if (status)
qtd_list_free (ehci, urb, qtd_list); qtd_list_free (ehci, urb, qtd_list);
...@@ -496,10 +503,6 @@ intr_complete ( ...@@ -496,10 +503,6 @@ intr_complete (
struct ehci_qh *qh, struct ehci_qh *qh,
unsigned long flags /* caller owns ehci->lock ... */ unsigned long flags /* caller owns ehci->lock ... */
) { ) {
struct ehci_qtd *qtd;
struct urb *urb;
int unlinking;
/* nothing to report? */ /* nothing to report? */
if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE)) if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
!= 0)) != 0))
...@@ -509,43 +512,14 @@ intr_complete ( ...@@ -509,43 +512,14 @@ intr_complete (
return flags; return flags;
} }
qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list); /* handle any completions */
urb = qtd->urb;
unlinking = (urb->status == -ENOENT) || (urb->status == -ECONNRESET);
/* call any completions, after patching for reactivation */
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
/* NOTE: currently restricted to one qtd per qh! */ qh_completions (ehci, qh);
if (qh_completions (ehci, qh, 0) == 0)
urb = 0;
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
/* never reactivate requests that were unlinked ... */ if (unlikely (list_empty (&qh->qtd_list)))
if (likely (urb != 0)) { intr_deschedule (ehci, qh, 0);
if (unlinking
|| urb->status == -ECONNRESET
|| urb->status == -ENOENT
// || (urb->dev == null)
|| ehci->hcd.state == USB_STATE_HALT)
urb = 0;
// FIXME look at all those unlink cases ... we always
// need exactly one completion that reports unlink.
// the one above might not have been it!
}
/* normally reactivate */
if (likely (urb != 0)) {
if (usb_pipeout (urb->pipe))
pci_dma_sync_single (ehci->hcd.pdev,
qtd->buf_dma,
urb->transfer_buffer_length,
PCI_DMA_TODEVICE);
urb->status = -EINPROGRESS;
urb->actual_length = 0;
/* patch qh and restart */
qh_update (qh, qtd);
}
return flags; return flags;
} }
......
...@@ -299,6 +299,7 @@ struct ehci_qh { ...@@ -299,6 +299,7 @@ struct ehci_qh {
u8 c_usecs; /* ... split completion bw */ u8 c_usecs; /* ... split completion bw */
unsigned short period; /* polling interval */ unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */ unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
} __attribute__ ((aligned (32))); } __attribute__ ((aligned (32)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment