Commit 39c5c63d authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ehci updates

This patch is the first part of fixing the EHCI driver to queue
interrupt transactions, handle larger requests, and basically treat
interrupt as just "bulk that lives on the periodic schedule".  One
more patch should wrap that up.

     qh processing cleanup
	- split "append tds to qh" logic out of "put on async schedule",
	  so it can be used with "put on periodic schedule" too
     interrupt transfer cleanup
	- save rest of scheduling params in the qh
	- calculate scheduling params only once
     other cleanup
	- use new container_of()
	- minor code shrinkage (avoid pipe bitops, conditionals, etc)
	- rename variable (will track endpoints, not urbs)
	- free_config() logic
parent 4344c76c
...@@ -626,10 +626,11 @@ static int ehci_urb_enqueue ( ...@@ -626,10 +626,11 @@ static int ehci_urb_enqueue (
urb->transfer_flags &= ~EHCI_STATE_UNLINK; urb->transfer_flags &= ~EHCI_STATE_UNLINK;
INIT_LIST_HEAD (&qtd_list); INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL: switch (usb_pipetype (urb->pipe)) {
case PIPE_BULK: // case PIPE_CONTROL:
// case PIPE_BULK:
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM; return -ENOMEM;
return submit_async (ehci, urb, &qtd_list, mem_flags); return submit_async (ehci, urb, &qtd_list, mem_flags);
...@@ -649,9 +650,6 @@ static int ehci_urb_enqueue ( ...@@ -649,9 +650,6 @@ static int ehci_urb_enqueue (
dbg ("no split iso support yet"); dbg ("no split iso support yet");
return -ENOSYS; return -ENOSYS;
#endif /* have_split_iso */ #endif /* have_split_iso */
default: /* can't happen */
return -ENOSYS;
} }
} }
...@@ -665,15 +663,16 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) ...@@ -665,15 +663,16 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
unsigned long flags; unsigned long flags;
dbg ("%s urb_dequeue %p qh state %d", dbg ("%s urb_dequeue %p qh %p state %d",
hcd->self.bus_name, urb, qh->qh_state); hcd->self.bus_name, urb, qh, qh->qh_state);
switch (usb_pipetype (urb->pipe)) { switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL: // case PIPE_CONTROL:
case PIPE_BULK: // case PIPE_BULK:
default:
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
if (ehci->reclaim) { if (ehci->reclaim) {
dbg ("dq: reclaim busy, %s", RUN_CONTEXT); dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
if (in_interrupt ()) { if (in_interrupt ()) {
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
return -EAGAIN; return -EAGAIN;
...@@ -683,11 +682,8 @@ dbg ("dq: reclaim busy, %s", RUN_CONTEXT); ...@@ -683,11 +682,8 @@ dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
&& ehci->hcd.state != USB_STATE_HALT && ehci->hcd.state != USB_STATE_HALT
) { ) {
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
// yeech ... this could spin for up to two frames! /* let pending unlinks complete */
dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d", wait_ms (1);
qh->qh_state, ehci->reclaim, ehci->hcd.state
);
udelay (100);
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
} }
} }
...@@ -712,9 +708,9 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d", ...@@ -712,9 +708,9 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
// wait till next completion, do it then. // wait till next completion, do it then.
// completion irqs can wait up to 1024 msec, // completion irqs can wait up to 1024 msec,
urb->transfer_flags |= EHCI_STATE_UNLINK; urb->transfer_flags |= EHCI_STATE_UNLINK;
return 0; break;
} }
return -EINVAL; return 0;
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -728,6 +724,7 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev) ...@@ -728,6 +724,7 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
int i; int i;
unsigned long flags; unsigned long flags;
/* ASSERT: no requests/urbs are still linked (so no TDs) */
/* ASSERT: nobody can be submitting urbs for this any more */ /* ASSERT: nobody can be submitting urbs for this any more */
dbg ("%s: free_config devnum %d", hcd->self.bus_name, udev->devnum); dbg ("%s: free_config devnum %d", hcd->self.bus_name, udev->devnum);
...@@ -736,34 +733,57 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev) ...@@ -736,34 +733,57 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
if (dev->ep [i]) { if (dev->ep [i]) {
struct ehci_qh *qh; struct ehci_qh *qh;
char *why;
/* dev->ep never has ITDs or SITDs */ /* dev->ep never has ITDs or SITDs */
qh = (struct ehci_qh *) dev->ep [i]; qh = (struct ehci_qh *) dev->ep [i];
vdbg ("free_config, ep 0x%02x qh %p", i, qh);
if (!list_empty (&qh->qtd_list)) { /* detect/report non-recoverable errors */
dbg ("ep 0x%02x qh %p not empty!", i, qh); if (in_interrupt ())
why = "disconnect() didn't";
else if ((qh->hw_info2 & cpu_to_le32 (0xffff)) != 0
&& qh->qh_state != QH_STATE_IDLE)
why = "(active periodic)";
else
why = 0;
if (why) {
err ("dev %s-%s ep %d-%s error: %s",
hcd->self.bus_name, udev->devpath,
i & 0xf, (i & 0x10) ? "IN" : "OUT",
why);
BUG (); BUG ();
} }
dev->ep [i] = 0;
/* wait_ms() won't spin here -- we're a thread */ dev->ep [i] = 0;
if (qh->qh_state == QH_STATE_IDLE)
goto idle;
dbg ("free_config, async ep 0x%02x qh %p", i, qh);
/* scan_async() empties the ring as it does its work,
* using IAA, but doesn't (yet?) turn it off. if it
* doesn't empty this qh, likely it's the last entry.
*/
while (qh->qh_state == QH_STATE_LINKED while (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim && ehci->reclaim
&& ehci->hcd.state != USB_STATE_HALT && ehci->hcd.state != USB_STATE_HALT
) { ) {
spin_unlock_irqrestore (&ehci->lock, flags); spin_unlock_irqrestore (&ehci->lock, flags);
/* wait_ms() won't spin, we're a thread;
* and we know IRQ+tasklet can progress
*/
wait_ms (1); wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
} }
if (qh->qh_state == QH_STATE_LINKED) { if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh); start_unlink_async (ehci, qh);
while (qh->qh_state != QH_STATE_IDLE) { while (qh->qh_state != QH_STATE_IDLE
spin_unlock_irqrestore (&ehci->lock, && ehci->hcd.state != USB_STATE_HALT) {
flags); spin_unlock_irqrestore (&ehci->lock,
wait_ms (1); flags);
spin_lock_irqsave (&ehci->lock, flags); wait_ms (1);
} spin_lock_irqsave (&ehci->lock, flags);
} }
idle:
qh_put (ehci, qh); qh_put (ehci, qh);
} }
} }
......
This diff is collapsed.
...@@ -241,10 +241,10 @@ static void intr_deschedule ( ...@@ -241,10 +241,10 @@ static void intr_deschedule (
qh->qh_state = QH_STATE_UNLINK; qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0; qh->qh_next.ptr = 0;
ehci->periodic_urbs--; ehci->periodic_sched--;
/* maybe turn off periodic schedule */ /* maybe turn off periodic schedule */
if (!ehci->periodic_urbs) if (!ehci->periodic_sched)
status = disable_periodic (ehci); status = disable_periodic (ehci);
else { else {
status = 0; status = 0;
...@@ -266,13 +266,13 @@ static void intr_deschedule ( ...@@ -266,13 +266,13 @@ static void intr_deschedule (
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d", vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame, qh, period, frame,
atomic_read (&qh->refcount), ehci->periodic_urbs); atomic_read (&qh->refcount), ehci->periodic_sched);
} }
static int check_period ( static int check_period (
struct ehci_hcd *ehci, struct ehci_hcd *ehci,
unsigned frame, unsigned frame,
int uframe, unsigned uframe,
unsigned period, unsigned period,
unsigned usecs unsigned usecs
) { ) {
...@@ -315,8 +315,7 @@ static int intr_submit ( ...@@ -315,8 +315,7 @@ static int intr_submit (
struct list_head *qtd_list, struct list_head *qtd_list,
int mem_flags int mem_flags
) { ) {
unsigned epnum, period; unsigned epnum;
unsigned short usecs, c_usecs, gap_uf;
unsigned long flags; unsigned long flags;
struct ehci_qh *qh; struct ehci_qh *qh;
struct hcd_dev *dev; struct hcd_dev *dev;
...@@ -329,43 +328,6 @@ static int intr_submit ( ...@@ -329,43 +328,6 @@ static int intr_submit (
if (is_input) if (is_input)
epnum |= 0x10; epnum |= 0x10;
/*
* HS interrupt transfers are simple -- only one microframe. FS/LS
* interrupt transfers involve a SPLIT in one microframe and CSPLIT
* sometime later. We need to know how much time each will be
* needed in each microframe and, for FS/LS, how many microframes
* separate the two in the best case.
*/
usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
urb->transfer_buffer_length);
if (urb->dev->speed == USB_SPEED_HIGH) {
gap_uf = 0;
c_usecs = 0;
/* FIXME handle HS periods of less than 1 frame. */
period = urb->interval >> 3;
if (period < 1) {
dbg ("intr period %d uframes, NYET!", urb->interval);
status = -EINVAL;
goto done;
}
} else {
/* gap is a function of full/low speed transfer times */
gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, is_input, 0,
urb->transfer_buffer_length) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
c_usecs = usecs + HS_USECS (0);
usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
usecs = usecs + HS_USECS (1);
c_usecs = HS_USECS (0);
}
period = urb->interval;
}
/* /*
* NOTE: current completion/restart logic doesn't handle more than * NOTE: current completion/restart logic doesn't handle more than
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this. * one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
...@@ -423,19 +385,17 @@ static int intr_submit ( ...@@ -423,19 +385,17 @@ static int intr_submit (
/* Schedule this periodic QH. */ /* Schedule this periodic QH. */
if (likely (status == 0)) { if (likely (status == 0)) {
unsigned frame = period; unsigned frame = qh->period;
qh->hw_next = EHCI_LIST_END; qh->hw_next = EHCI_LIST_END;
qh->usecs = usecs;
qh->c_usecs = c_usecs;
urb->hcpriv = qh_get (qh); urb->hcpriv = qh_get (qh);
status = -ENOSPC; status = -ENOSPC;
/* pick a set of schedule slots, link the QH into them */ /* pick a set of schedule slots, link the QH into them */
do { do {
int uframe; unsigned uframe;
u32 c_mask = 0; u32 c_mask = 0;
/* pick a set of slots such that all uframes have /* pick a set of slots such that all uframes have
* enough periodic bandwidth available. * enough periodic bandwidth available.
...@@ -443,7 +403,7 @@ static int intr_submit ( ...@@ -443,7 +403,7 @@ static int intr_submit (
frame--; frame--;
for (uframe = 0; uframe < 8; uframe++) { for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe, if (check_period (ehci, frame, uframe,
period, usecs) == 0) qh->period, qh->usecs) == 0)
continue; continue;
/* If this is a split transaction, check the /* If this is a split transaction, check the
...@@ -462,18 +422,18 @@ static int intr_submit ( ...@@ -462,18 +422,18 @@ static int intr_submit (
* *
* FIXME ehci 0.96 and above can use FSTNs * FIXME ehci 0.96 and above can use FSTNs
*/ */
if (!c_usecs) if (!qh->c_usecs)
break; break;
if (check_period (ehci, frame, if (check_period (ehci, frame,
uframe + gap_uf, uframe + qh->gap_uf,
period, c_usecs) == 0) qh->period, qh->c_usecs) == 0)
continue; continue;
if (check_period (ehci, frame, if (check_period (ehci, frame,
uframe + gap_uf + 1, uframe + qh->gap_uf + 1,
period, c_usecs) == 0) qh->period, qh->c_usecs) == 0)
continue; continue;
c_mask = 0x03 << (8 + uframe + gap_uf); c_mask = 0x03 << (8 + uframe + qh->gap_uf);
c_mask = cpu_to_le32 (c_mask); c_mask = cpu_to_le32 (c_mask);
break; break;
} }
...@@ -481,7 +441,7 @@ static int intr_submit ( ...@@ -481,7 +441,7 @@ static int intr_submit (
continue; continue;
/* QH will run once each period, starting there */ /* QH will run once each period, starting there */
urb->start_frame = frame; urb->start_frame = qh->start = frame;
status = 0; status = 0;
/* reset S-frame and (maybe) C-frame masks */ /* reset S-frame and (maybe) C-frame masks */
...@@ -491,8 +451,10 @@ static int intr_submit ( ...@@ -491,8 +451,10 @@ static int intr_submit (
/* stuff into the periodic schedule */ /* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED; qh->qh_state = QH_STATE_LINKED;
vdbg ("qh %p usecs %d period %d.0 starting %d.%d", dbg ("qh %p usecs %d/%d period %d.0 "
qh, qh->usecs, period, frame, uframe); "starting %d.%d (gap %d)",
qh, qh->usecs, qh->c_usecs, qh->period,
frame, uframe, qh->gap_uf);
do { do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) { if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period, // FIXME -- just link toward the end, before any qh with a shorter period,
...@@ -505,15 +467,15 @@ static int intr_submit ( ...@@ -505,15 +467,15 @@ static int intr_submit (
QH_NEXT (qh->qh_dma); QH_NEXT (qh->qh_dma);
} }
wmb (); wmb ();
frame += period; frame += qh->period;
} while (frame < ehci->periodic_size); } while (frame < ehci->periodic_size);
/* update bandwidth utilization records (for usbfs) */ /* update bandwidth utilization records (for usbfs) */
usb_claim_bandwidth (urb->dev, urb, usb_claim_bandwidth (urb->dev, urb,
(usecs + c_usecs) / period, 0); (qh->usecs + qh->c_usecs) / qh->period, 0);
/* maybe enable periodic schedule processing */ /* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) if (!ehci->periodic_sched++)
status = enable_periodic (ehci); status = enable_periodic (ehci);
break; break;
...@@ -806,7 +768,7 @@ static int get_iso_range ( ...@@ -806,7 +768,7 @@ static int get_iso_range (
/* calculate the legal range [start,max) */ /* calculate the legal range [start,max) */
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */ now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
if (!ehci->periodic_urbs) if (!ehci->periodic_sched)
now += 8; /* startup delay */ now += 8; /* startup delay */
now %= mod; now %= mod;
end = now + mod; end = now + mod;
...@@ -926,7 +888,7 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb) ...@@ -926,7 +888,7 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
usb_claim_bandwidth (urb->dev, urb, usecs, 1); usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */ /* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) { if (!ehci->periodic_sched++) {
if ((status = enable_periodic (ehci)) != 0) { if ((status = enable_periodic (ehci)) != 0) {
// FIXME deschedule right away // FIXME deschedule right away
err ("itd_schedule, enable = %d", status); err ("itd_schedule, enable = %d", status);
...@@ -1009,8 +971,8 @@ itd_complete ( ...@@ -1009,8 +971,8 @@ itd_complete (
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
/* defer stopping schedule; completion can submit */ /* defer stopping schedule; completion can submit */
ehci->periodic_urbs--; ehci->periodic_sched--;
if (!ehci->periodic_urbs) if (!ehci->periodic_sched)
(void) disable_periodic (ehci); (void) disable_periodic (ehci);
return flags; return flags;
......
...@@ -50,7 +50,7 @@ struct ehci_hcd { /* one per controller */ ...@@ -50,7 +50,7 @@ struct ehci_hcd { /* one per controller */
union ehci_shadow *pshadow; /* mirror hw periodic table */ union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */ int next_uframe; /* scan periodic, start here */
unsigned periodic_urbs; /* how many urbs scheduled? */ unsigned periodic_sched; /* periodic activity count */
/* deferred work from IRQ, etc */ /* deferred work from IRQ, etc */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
...@@ -72,7 +72,7 @@ struct ehci_hcd { /* one per controller */ ...@@ -72,7 +72,7 @@ struct ehci_hcd { /* one per controller */
}; };
/* unwrap an HCD pointer to get an EHCI_HCD pointer */ /* unwrap an HCD pointer to get an EHCI_HCD pointer */
#define hcd_to_ehci(hcd_ptr) list_entry(hcd_ptr, struct ehci_hcd, hcd) #define hcd_to_ehci(hcd_ptr) container_of(hcd_ptr, struct ehci_hcd, hcd)
/* NOTE: urb->transfer_flags expected to not use this bit !!! */ /* NOTE: urb->transfer_flags expected to not use this bit !!! */
#define EHCI_STATE_UNLINK 0x8000 /* urb being unlinked */ #define EHCI_STATE_UNLINK 0x8000 /* urb being unlinked */
...@@ -287,12 +287,19 @@ struct ehci_qh { ...@@ -287,12 +287,19 @@ struct ehci_qh {
struct list_head qtd_list; /* sw qtd list */ struct list_head qtd_list; /* sw qtd list */
atomic_t refcount; atomic_t refcount;
unsigned short usecs; /* intr bandwidth */
unsigned short c_usecs; /* ... split completion bw */ u8 qh_state;
short qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */ #define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */ #define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */ #define QH_STATE_IDLE 3 /* HC doesn't see this */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
} __attribute__ ((aligned (32))); } __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment