Commit 39c5c63d authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ehci updates

This patch is the first part of fixing the EHCI driver to queue
interrupt transactions, handle larger requests, and basically treat
interrupt as just "bulk that lives on the periodic schedule".  One
more patch should wrap that up.

     qh processing cleanup
	- split "append tds to qh" logic out of "put on async schedule",
	  so it can be used with "put on periodic schedule" too
     interrupt transfer cleanup
	- save rest of scheduling params in the qh
	- calculate scheduling params only once
     other cleanup
	- use new container_of()
	- minor code shrinkage (avoid pipe bitops, conditionals, etc)
	- rename variable (will track endpoints, not urbs)
	- free_config() logic
parent 4344c76c
......@@ -626,10 +626,11 @@ static int ehci_urb_enqueue (
urb->transfer_flags &= ~EHCI_STATE_UNLINK;
INIT_LIST_HEAD (&qtd_list);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async (ehci, urb, &qtd_list, mem_flags);
......@@ -649,9 +650,6 @@ static int ehci_urb_enqueue (
dbg ("no split iso support yet");
return -ENOSYS;
#endif /* have_split_iso */
default: /* can't happen */
return -ENOSYS;
}
}
......@@ -665,15 +663,16 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
unsigned long flags;
dbg ("%s urb_dequeue %p qh state %d",
hcd->self.bus_name, urb, qh->qh_state);
dbg ("%s urb_dequeue %p qh %p state %d",
hcd->self.bus_name, urb, qh, qh->qh_state);
switch (usb_pipetype (urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->reclaim) {
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
if (in_interrupt ()) {
spin_unlock_irqrestore (&ehci->lock, flags);
return -EAGAIN;
......@@ -683,11 +682,8 @@ dbg ("dq: reclaim busy, %s", RUN_CONTEXT);
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
// yeech ... this could spin for up to two frames!
dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
qh->qh_state, ehci->reclaim, ehci->hcd.state
);
udelay (100);
/* let pending unlinks complete */
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
}
......@@ -712,9 +708,9 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
// wait till next completion, do it then.
// completion irqs can wait up to 1024 msec,
urb->transfer_flags |= EHCI_STATE_UNLINK;
return 0;
break;
}
return -EINVAL;
return 0;
}
/*-------------------------------------------------------------------------*/
......@@ -728,6 +724,7 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
int i;
unsigned long flags;
/* ASSERT: no requests/urbs are still linked (so no TDs) */
/* ASSERT: nobody can be submitting urbs for this any more */
dbg ("%s: free_config devnum %d", hcd->self.bus_name, udev->devnum);
......@@ -736,34 +733,57 @@ static void ehci_free_config (struct usb_hcd *hcd, struct usb_device *udev)
for (i = 0; i < 32; i++) {
if (dev->ep [i]) {
struct ehci_qh *qh;
char *why;
/* dev->ep never has ITDs or SITDs */
qh = (struct ehci_qh *) dev->ep [i];
vdbg ("free_config, ep 0x%02x qh %p", i, qh);
if (!list_empty (&qh->qtd_list)) {
dbg ("ep 0x%02x qh %p not empty!", i, qh);
/* detect/report non-recoverable errors */
if (in_interrupt ())
why = "disconnect() didn't";
else if ((qh->hw_info2 & cpu_to_le32 (0xffff)) != 0
&& qh->qh_state != QH_STATE_IDLE)
why = "(active periodic)";
else
why = 0;
if (why) {
err ("dev %s-%s ep %d-%s error: %s",
hcd->self.bus_name, udev->devpath,
i & 0xf, (i & 0x10) ? "IN" : "OUT",
why);
BUG ();
}
dev->ep [i] = 0;
/* wait_ms() won't spin here -- we're a thread */
dev->ep [i] = 0;
if (qh->qh_state == QH_STATE_IDLE)
goto idle;
dbg ("free_config, async ep 0x%02x qh %p", i, qh);
/* scan_async() empties the ring as it does its work,
* using IAA, but doesn't (yet?) turn it off. if it
* doesn't empty this qh, likely it's the last entry.
*/
while (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& ehci->hcd.state != USB_STATE_HALT
) {
spin_unlock_irqrestore (&ehci->lock, flags);
/* wait_ms() won't spin, we're a thread;
* and we know IRQ+tasklet can progress
*/
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
if (qh->qh_state == QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
while (qh->qh_state != QH_STATE_IDLE) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
while (qh->qh_state != QH_STATE_IDLE
&& ehci->hcd.state != USB_STATE_HALT) {
spin_unlock_irqrestore (&ehci->lock,
flags);
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
idle:
qh_put (ehci, qh);
}
}
......
......@@ -47,9 +47,11 @@ static int
qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf [0] = cpu_to_le32 (buf);
qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
......@@ -59,7 +61,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
u64 addr = buf;
addr = buf;
qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
buf += 0x1000;
......@@ -312,7 +314,7 @@ qh_completions (
/* unlink the rest? once we start unlinking, after
* a fault or explicit unlink, we unlink all later
* urbs. usb spec requires that.
* urbs. usb spec requires that for faults...
*/
if (unlink && urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
......@@ -450,6 +452,7 @@ qh_urb_transaction (
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf, map_buf;
int len, maxpacket;
int is_input;
u32 token;
/*
......@@ -495,10 +498,11 @@ qh_urb_transaction (
* data transfer stage: buffer setup
*/
len = urb->transfer_buffer_length;
is_input = usb_pipein (urb->pipe);
if (likely (len > 0)) {
buf = map_buf = pci_map_single (ehci->hcd.pdev,
urb->transfer_buffer, len,
usb_pipein (urb->pipe)
is_input
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE);
if (unlikely (!buf))
......@@ -506,12 +510,11 @@ qh_urb_transaction (
} else
buf = map_buf = 0;
if (!buf || usb_pipein (urb->pipe))
if (!buf || is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
maxpacket = usb_maxpacket (urb->dev, urb->pipe, !is_input) & 0x03ff;
/*
* buffer gets wrapped in one or more qtds;
......@@ -607,6 +610,11 @@ clear_toggle (struct usb_device *udev, int ep, int is_out, struct ehci_qh *qh)
// That'd mean updating how usbcore talks to HCDs. (2.5?)
// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
// ... and packet size, for any kind of endpoint descriptor
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x03ff)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
......@@ -624,6 +632,8 @@ ehci_qh_make (
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
......@@ -634,6 +644,53 @@ ehci_qh_make (
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
is_input = usb_pipein (urb->pipe);
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
hb_mult (maxp) * max_packet (maxp));
qh->start = ~0;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
/* FIXME handle HS periods of less than 1 frame. */
qh->period = urb->interval >> 3;
if (qh->period < 1) {
dbg ("intr period %d uframes, NYET!",
urb->interval);
qh = 0;
goto done;
}
} else {
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
}
qh->period = urb->interval;
}
}
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
......@@ -643,49 +700,42 @@ ehci_qh_make (
case USB_SPEED_FULL:
/* EPS 0 means "full" */
info1 |= (EHCI_TUNE_RL_TT << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe)) << 16;
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
info2 |= urb->dev->tt->hub->devnum << 16;
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets c-mask }
* ... and a 0.96 scheduler might use FSTN nodes too
*/
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
info1 |= (EHCI_TUNE_RL_HS << 28);
if (usb_pipecontrol (urb->pipe)) {
if (type == PIPE_CONTROL) {
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (usb_pipebulk (urb->pipe)) {
} else if (type == PIPE_BULK) {
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else {
u32 temp;
temp = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
info1 |= (temp & 0x3ff) << 16; /* maxpacket */
/* HS intr can be "high bandwidth" */
temp = 1 + ((temp >> 11) & 0x03);
info2 |= temp << 30; /* mult */
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
info2 |= hb_mult (maxp) << 30;
}
break;
default:
#ifdef DEBUG
default:
BUG ();
#endif
}
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets s-mask } */
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32 (info1);
......@@ -696,14 +746,13 @@ ehci_qh_make (
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
/* initialize data toggle state */
if (!usb_pipecontrol (urb->pipe))
clear_toggle (urb->dev,
usb_pipeendpoint (urb->pipe),
usb_pipeout (urb->pipe),
qh);
clear_toggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, qh);
done:
return qh;
}
#undef hb_mult
#undef hb_packet
/*-------------------------------------------------------------------------*/
......@@ -745,35 +794,29 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
static int
submit_async (
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
int epnum,
void **ptr
)
{
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = (struct ehci_qh *) dev->ep [epnum];
qh = (struct ehci_qh *) *ptr;
if (likely (qh != 0)) {
u32 hw_next = QTD_NEXT (qtd->qtd_dma);
struct ehci_qtd *qtd;
u32 hw_next;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
hw_next = QTD_NEXT (qtd->qtd_dma);
/* maybe patch the qh used for set_address */
if (unlikely (epnum == 0
......@@ -803,6 +846,7 @@ submit_async (
* Interrupt code must cope with case of HC having it
* cached, and clobbering these updates.
* ... complicates getting rid of extra interrupts!
* (Or: use dummy td, so cache always stays valid.)
*/
if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
wmb ();
......@@ -822,8 +866,7 @@ submit_async (
*/
/* usb_clear_halt() means qh data toggle gets reset */
if (usb_pipebulk (urb->pipe)
&& unlikely (!usb_gettoggle (urb->dev,
if (unlikely (!usb_gettoggle (urb->dev,
(epnum & 0x0f),
!(epnum & 0x10)))) {
clear_toggle (urb->dev,
......@@ -836,17 +879,47 @@ submit_async (
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg_qh ("new qh", ehci, qh);
dev->ep [epnum] = qh;
}
// if (qh) dbg_qh ("new qh", ehci, qh);
*ptr = qh;
}
if (qh)
urb->hcpriv = qh_get (qh);
return qh;
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh != 0)) {
urb->hcpriv = qh_get (qh);
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async (ehci, qh_get (qh));
}
......@@ -979,7 +1052,9 @@ static void scan_async (struct ehci_hcd *ehci)
qh_put (ehci, qh);
}
/* unlink idle entries (reduces PCI usage) */
/* unlink idle entries, reducing HC PCI usage as
* well as HCD schedule-scanning costs
*/
if (list_empty (&qh->qtd_list) && !ehci->reclaim) {
if (qh->qh_next.qh != qh) {
// dbg ("irq/empty");
......@@ -987,6 +1062,7 @@ static void scan_async (struct ehci_hcd *ehci)
} else {
// FIXME: arrange to stop
// after it's been idle a while.
// stop/restart isn't free...
}
}
qh = qh->qh_next.qh;
......
......@@ -241,10 +241,10 @@ static void intr_deschedule (
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0;
ehci->periodic_urbs--;
ehci->periodic_sched--;
/* maybe turn off periodic schedule */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
status = disable_periodic (ehci);
else {
status = 0;
......@@ -266,13 +266,13 @@ static void intr_deschedule (
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame,
atomic_read (&qh->refcount), ehci->periodic_urbs);
atomic_read (&qh->refcount), ehci->periodic_sched);
}
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
int uframe,
unsigned uframe,
unsigned period,
unsigned usecs
) {
......@@ -315,8 +315,7 @@ static int intr_submit (
struct list_head *qtd_list,
int mem_flags
) {
unsigned epnum, period;
unsigned short usecs, c_usecs, gap_uf;
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
......@@ -329,43 +328,6 @@ static int intr_submit (
if (is_input)
epnum |= 0x10;
/*
* HS interrupt transfers are simple -- only one microframe. FS/LS
* interrupt transfers involve a SPLIT in one microframe and CSPLIT
* sometime later. We need to know how much time each will be
* needed in each microframe and, for FS/LS, how many microframes
* separate the two in the best case.
*/
usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
urb->transfer_buffer_length);
if (urb->dev->speed == USB_SPEED_HIGH) {
gap_uf = 0;
c_usecs = 0;
/* FIXME handle HS periods of less than 1 frame. */
period = urb->interval >> 3;
if (period < 1) {
dbg ("intr period %d uframes, NYET!", urb->interval);
status = -EINVAL;
goto done;
}
} else {
/* gap is a function of full/low speed transfer times */
gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, is_input, 0,
urb->transfer_buffer_length) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
c_usecs = usecs + HS_USECS (0);
usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
usecs = usecs + HS_USECS (1);
c_usecs = HS_USECS (0);
}
period = urb->interval;
}
/*
* NOTE: current completion/restart logic doesn't handle more than
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
......@@ -423,19 +385,17 @@ static int intr_submit (
/* Schedule this periodic QH. */
if (likely (status == 0)) {
unsigned frame = period;
unsigned frame = qh->period;
qh->hw_next = EHCI_LIST_END;
qh->usecs = usecs;
qh->c_usecs = c_usecs;
urb->hcpriv = qh_get (qh);
status = -ENOSPC;
/* pick a set of schedule slots, link the QH into them */
do {
int uframe;
u32 c_mask = 0;
unsigned uframe;
u32 c_mask = 0;
/* pick a set of slots such that all uframes have
* enough periodic bandwidth available.
......@@ -443,7 +403,7 @@ static int intr_submit (
frame--;
for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe,
period, usecs) == 0)
qh->period, qh->usecs) == 0)
continue;
/* If this is a split transaction, check the
......@@ -462,18 +422,18 @@ static int intr_submit (
*
* FIXME ehci 0.96 and above can use FSTNs
*/
if (!c_usecs)
if (!qh->c_usecs)
break;
if (check_period (ehci, frame,
uframe + gap_uf,
period, c_usecs) == 0)
uframe + qh->gap_uf,
qh->period, qh->c_usecs) == 0)
continue;
if (check_period (ehci, frame,
uframe + gap_uf + 1,
period, c_usecs) == 0)
uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs) == 0)
continue;
c_mask = 0x03 << (8 + uframe + gap_uf);
c_mask = 0x03 << (8 + uframe + qh->gap_uf);
c_mask = cpu_to_le32 (c_mask);
break;
}
......@@ -481,7 +441,7 @@ static int intr_submit (
continue;
/* QH will run once each period, starting there */
urb->start_frame = frame;
urb->start_frame = qh->start = frame;
status = 0;
/* reset S-frame and (maybe) C-frame masks */
......@@ -491,8 +451,10 @@ static int intr_submit (
/* stuff into the periodic schedule */
qh->qh_state = QH_STATE_LINKED;
vdbg ("qh %p usecs %d period %d.0 starting %d.%d",
qh, qh->usecs, period, frame, uframe);
dbg ("qh %p usecs %d/%d period %d.0 "
"starting %d.%d (gap %d)",
qh, qh->usecs, qh->c_usecs, qh->period,
frame, uframe, qh->gap_uf);
do {
if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
......@@ -505,15 +467,15 @@ static int intr_submit (
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += period;
frame += qh->period;
} while (frame < ehci->periodic_size);
/* update bandwidth utilization records (for usbfs) */
usb_claim_bandwidth (urb->dev, urb,
(usecs + c_usecs) / period, 0);
(qh->usecs + qh->c_usecs) / qh->period, 0);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++)
if (!ehci->periodic_sched++)
status = enable_periodic (ehci);
break;
......@@ -806,7 +768,7 @@ static int get_iso_range (
/* calculate the legal range [start,max) */
now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
if (!ehci->periodic_urbs)
if (!ehci->periodic_sched)
now += 8; /* startup delay */
now %= mod;
end = now + mod;
......@@ -926,7 +888,7 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++) {
if (!ehci->periodic_sched++) {
if ((status = enable_periodic (ehci)) != 0) {
// FIXME deschedule right away
err ("itd_schedule, enable = %d", status);
......@@ -1009,8 +971,8 @@ itd_complete (
spin_lock_irqsave (&ehci->lock, flags);
/* defer stopping schedule; completion can submit */
ehci->periodic_urbs--;
if (!ehci->periodic_urbs)
ehci->periodic_sched--;
if (!ehci->periodic_sched)
(void) disable_periodic (ehci);
return flags;
......
......@@ -50,7 +50,7 @@ struct ehci_hcd { /* one per controller */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned periodic_urbs; /* how many urbs scheduled? */
unsigned periodic_sched; /* periodic activity count */
/* deferred work from IRQ, etc */
struct tasklet_struct tasklet;
......@@ -72,7 +72,7 @@ struct ehci_hcd { /* one per controller */
};
/* unwrap an HCD pointer to get an EHCI_HCD pointer */
#define hcd_to_ehci(hcd_ptr) list_entry(hcd_ptr, struct ehci_hcd, hcd)
#define hcd_to_ehci(hcd_ptr) container_of(hcd_ptr, struct ehci_hcd, hcd)
/* NOTE: urb->transfer_flags expected to not use this bit !!! */
#define EHCI_STATE_UNLINK 0x8000 /* urb being unlinked */
......@@ -287,12 +287,19 @@ struct ehci_qh {
struct list_head qtd_list; /* sw qtd list */
atomic_t refcount;
unsigned short usecs; /* intr bandwidth */
unsigned short c_usecs; /* ... split completion bw */
short qh_state;
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment