Commit b92eb850 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ohci-hcd endpoint scheduling, driverfs

This patch cleans up some messy parts of this driver, and
was pleasantly painless.

      - gets rid of ED dma hashtables
         * less memory needed
         * also less (+faster) code
         * ... rewrites all ED scheduling ops, they now use
           cpu addresses, like EHCI and UHCI do already

      - simplifies ED scheduling (no dma hashtables)
         * control and bulk lists are now doubly linked
         * periodic tree still singly linked; driver uses a
           new CPU view "shadow" of the hardware framelist
         * previous periodic code was cryptic, almost read-only
         * simpler tree code for EDs with {branch,period}

      - bugfixes periodic scheduling
         * when CONFIG_USB_BANDWIDTH, checks per-frame load
           against the limit; no more dodgey accounting
         * handles iso period != 1; interrupt and iso schedule
           EDs with the same routine (HW sees special TDs)
         * credit usbfs with bandwidth for endpoints, not URBs

      - adds driverfs output (when CONFIG_USB_DEBUG)
         * resembles EHCI:  'async' (control+bulk) and
           'periodic' (interrupt+iso) files show schedules
         * shows only queue heads (EDs) just now (*)

      - has minor text and code cleanups, etc

Now that this logic has morphed into more comprehensible
form, I know what to borrow into the EHCI code!


     (*) It shows TDs on the td_list, but this patch won't
         put them there.  A queue fault handling update will.
parent d5614a96
......@@ -72,37 +72,6 @@ static void urb_print (struct urb * urb, char * str, int small)
#endif
}
static inline struct ed *
dma_to_ed (struct ohci_hcd *hc, dma_addr_t ed_dma);
/* print non-empty branches of the periodic ed tree */
static void __attribute__ ((unused))
ohci_dump_periodic (struct ohci_hcd *ohci, char *label)
{
int i, j;
u32 *ed_p;
int printed = 0;
for (i= 0; i < 32; i++) {
j = 5;
ed_p = &(ohci->hcca->int_table [i]);
if (*ed_p == 0)
continue;
printed = 1;
printk (KERN_DEBUG "%s, ohci %s frame %2d:",
label, ohci->hcd.self.bus_name, i);
while (*ed_p != 0 && j--) {
struct ed *ed = dma_to_ed (ohci, le32_to_cpup(ed_p));
printk (" %p/%08x;", ed, ed->hwINFO);
ed_p = &ed->hwNextED;
}
printk ("\n");
}
if (!printed)
printk (KERN_DEBUG "%s, ohci %s, empty periodic schedule\n",
label, ohci->hcd.self.bus_name);
}
static void ohci_dump_intr_mask (char *label, __u32 mask)
{
dbg ("%s: 0x%08x%s%s%s%s%s%s%s%s%s",
......@@ -316,8 +285,9 @@ ohci_dump_ed (struct ohci_hcd *ohci, char *label, struct ed *ed, int verbose)
case ED_IN: type = "-IN"; break;
/* else from TDs ... control */
}
dbg (" info %08x MAX=%d%s%s%s EP=%d%s DEV=%d", le32_to_cpu (tmp),
0x0fff & (le32_to_cpu (tmp) >> 16),
dbg (" info %08x MAX=%d%s%s%s%s EP=%d%s DEV=%d", le32_to_cpu (tmp),
0x03ff & (le32_to_cpu (tmp) >> 16),
(tmp & ED_DEQUEUE) ? " DQ" : "",
(tmp & ED_ISO) ? " ISO" : "",
(tmp & ED_SKIP) ? " SKIP" : "",
(tmp & ED_LOWSPEED) ? " LOW" : "",
......@@ -344,5 +314,222 @@ ohci_dump_ed (struct ohci_hcd *ohci, char *label, struct ed *ed, int verbose)
}
}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,32)
# define DRIVERFS_DEBUG_FILES
#endif
#endif /* DEBUG */
/*-------------------------------------------------------------------------*/
#ifdef DRIVERFS_DEBUG_FILES
static ssize_t
show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
{
unsigned temp, size = count;
if (!ed)
return 0;
/* print first --> last */
while (ed->ed_prev)
ed = ed->ed_prev;
/* dump a snapshot of the bulk or control schedule */
while (ed) {
u32 info = ed->hwINFO;
u32 scratch = cpu_to_le32p (&ed->hwINFO);
struct list_head *entry;
struct td *td;
temp = snprintf (buf, size,
"ed/%p %cs dev%d ep%d-%s max %d %08x%s%s %s",
ed,
(info & ED_LOWSPEED) ? 'l' : 'f',
scratch & 0x7f,
(scratch >> 7) & 0xf,
(info & ED_IN) ? "in" : "out",
0x03ff & (scratch >> 16),
scratch,
(info & ED_SKIP) ? " s" : "",
(ed->hwHeadP & ED_H) ? " H" : "",
(ed->hwHeadP & ED_C) ? data1 : data0);
size -= temp;
buf += temp;
list_for_each (entry, &ed->td_list) {
u32 cbp, be;
td = list_entry (entry, struct td, td_list);
scratch = cpu_to_le32p (&td->hwINFO);
cbp = le32_to_cpup (&td->hwCBP);
be = le32_to_cpup (&td->hwBE);
temp = snprintf (buf, size,
"\n\ttd %p %s %d cc=%x urb %p (%08x)",
td,
({ char *pid;
switch (scratch & TD_DP) {
case TD_DP_SETUP: pid = "setup"; break;
case TD_DP_IN: pid = "in"; break;
case TD_DP_OUT: pid = "out"; break;
default: pid = "(?)"; break;
} pid;}),
cbp ? (be + 1 - cbp) : 0,
TD_CC_GET (scratch), td->urb, scratch);
size -= temp;
buf += temp;
}
temp = snprintf (buf, size, "\n");
size -= temp;
buf += temp;
ed = ed->ed_next;
}
return count - size;
}
static ssize_t
show_async (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ohci_hcd *ohci;
size_t temp;
unsigned long flags;
if (off != 0)
return 0;
pdev = container_of (dev, struct pci_dev, dev);
ohci = container_of (pci_get_drvdata (pdev), struct ohci_hcd, hcd);
/* display control and bulk lists together, for simplicity */
spin_lock_irqsave (&ohci->lock, flags);
temp = show_list (ohci, buf, count, ohci->ed_controltail);
count = show_list (ohci, buf + temp, count - temp, ohci->ed_bulktail);
spin_unlock_irqrestore (&ohci->lock, flags);
return temp + count;
}
static DEVICE_ATTR (async, S_IRUGO, show_async, NULL);
#define DBG_SCHED_LIMIT 64
static ssize_t
show_periodic (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
struct ohci_hcd *ohci;
struct ed **seen, *ed;
unsigned long flags;
unsigned temp, size, seen_count;
char *next;
unsigned i;
if (off != 0)
return 0;
if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
return 0;
seen_count = 0;
pdev = container_of (dev, struct pci_dev, dev);
ohci = container_of (pci_get_drvdata (pdev), struct ohci_hcd, hcd);
next = buf;
size = count;
temp = snprintf (next, size, "size = %d\n", NUM_INTS);
size -= temp;
next += temp;
/* dump a snapshot of the periodic schedule (and load) */
spin_lock_irqsave (&ohci->lock, flags);
for (i = 0; i < NUM_INTS; i++) {
if (!(ed = ohci->periodic [i]))
continue;
temp = snprintf (next, size, "%2d [%3d]:", i, ohci->load [i]);
size -= temp;
next += temp;
do {
temp = snprintf (next, size, " ed%d/%p",
ed->interval, ed);
size -= temp;
next += temp;
for (temp = 0; temp < seen_count; temp++) {
if (seen [temp] == ed)
break;
}
/* show more info the first time around */
if (temp == seen_count) {
u32 info = ed->hwINFO;
u32 scratch = cpu_to_le32p (&ed->hwINFO);
temp = snprintf (next, size,
" (%cs dev%d%s ep%d-%s"
" max %d %08x%s%s)",
(info & ED_LOWSPEED) ? 'l' : 'f',
scratch & 0x7f,
(info & ED_ISO) ? " iso" : "",
(scratch >> 7) & 0xf,
(info & ED_IN) ? "in" : "out",
0x03ff & (scratch >> 16),
scratch,
(info & ED_SKIP) ? " s" : "",
(ed->hwHeadP & ED_H) ? " H" : "");
size -= temp;
next += temp;
// FIXME some TD info too
if (seen_count < DBG_SCHED_LIMIT)
seen [seen_count++] = ed;
ed = ed->ed_next;
} else {
/* we've seen it and what's after */
temp = 0;
ed = 0;
}
} while (ed);
temp = snprintf (next, size, "\n");
size -= temp;
next += temp;
}
spin_unlock_irqrestore (&ohci->lock, flags);
kfree (seen);
return count - size;
}
static DEVICE_ATTR (periodic, S_IRUGO, show_periodic, NULL);
#undef DBG_SCHED_LIMIT
static inline void create_debug_files (struct ohci_hcd *bus)
{
device_create_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_create_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
// registers
dbg ("%s: created debug files", bus->hcd.self.bus_name);
}
static inline void remove_debug_files (struct ohci_hcd *bus)
{
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_async);
device_remove_file (&bus->hcd.pdev->dev, &dev_attr_periodic);
}
#else /* empty stubs for creating those files */
static inline void create_debug_files (struct ohci_hcd *bus) { }
static inline void remove_debug_files (struct ohci_hcd *bus) { }
#endif /* DRIVERFS_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
......@@ -17,6 +17,8 @@
*
* History:
*
* 2002/09/03 get rid of ed hashtables, rework periodic scheduling and
* bandwidth accounting; if debugging, show schedules in driverfs
* 2002/07/19 fixes to management of ED and schedule state.
* 2002/06/09 SA-1111 support (Christopher Hoover)
* 2002/06/01 remember frame when HC won't see EDs any more; use that info
......@@ -66,7 +68,6 @@
* v1.0 1999/04/27 initial release
*
* This file is licenced under the GPL.
* $Id: ohci-hcd.c,v 1.9 2002/03/27 20:41:57 dbrownell Exp $
*/
#include <linux/config.h>
......@@ -107,8 +108,8 @@
* - lots more testing!!
*/
#define DRIVER_VERSION "2002-Jul-19"
#define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell"
#define DRIVER_VERSION "2002-Sep-03"
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
/*-------------------------------------------------------------------------*/
......@@ -152,7 +153,6 @@ static int ohci_urb_enqueue (
unsigned int pipe = urb->pipe;
int i, size = 0;
unsigned long flags;
int bustime = 0;
int retval = 0;
#ifdef OHCI_VERBOSE_DEBUG
......@@ -230,43 +230,32 @@ static int ohci_urb_enqueue (
}
}
// FIXME: much of this switch should be generic, move to hcd code ...
// ... and what's not generic can't really be handled this way.
// need to consider periodicity for both types!
/* allocate and claim bandwidth if needed; ISO
* needs start frame index if it was't provided.
*/
switch (usb_pipetype (pipe)) {
case PIPE_ISOCHRONOUS:
if (urb->transfer_flags & USB_ISO_ASAP) {
urb->start_frame = ((ed->state != ED_IDLE)
? (ed->intriso.last_iso + 1)
: (le16_to_cpu (ohci->hcca->frame_no)
+ 10)) & 0xffff;
}
/* FALLTHROUGH */
case PIPE_INTERRUPT:
if (urb->bandwidth == 0) {
bustime = usb_check_bandwidth (urb->dev, urb);
}
if (bustime < 0) {
retval = bustime;
goto fail;
}
usb_claim_bandwidth (urb->dev, urb,
bustime, usb_pipeisoc (urb->pipe));
}
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
if (retval < 0)
goto fail;
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = le16_to_cpu (ohci->hcca->frame_no);
urb->hcpriv = urb_priv;
/* delay a few frames before the first TD */
frame += max_t (u16, 8, ed->interval);
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
/* schedule the ed if needed */
if (ed->state == ED_IDLE)
ed_schedule (ohci, ed);
/* yes, only USB_ISO_ASAP is supported, and
* urb->start_frame is never used as input.
*/
}
} else if (ed->type == PIPE_ISOCHRONOUS)
urb->start_frame = ed->last_iso + ed->interval;
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
* and update count of queued periodic urbs
*/
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
fail:
......@@ -537,6 +526,7 @@ static int hc_start (struct ohci_hcd *ohci)
return -ENODEV;
}
create_debug_files (ohci);
return 0;
}
......@@ -571,7 +561,8 @@ static void ohci_irq (struct usb_hcd *hcd)
if (ints & OHCI_INTR_UE) {
disable (ohci);
err ("OHCI Unrecoverable Error, %s disabled", hcd->self.bus_name);
err ("OHCI Unrecoverable Error, %s disabled",
hcd->self.bus_name);
// e.g. due to PCI Master/Target Abort
#ifdef DEBUG
......@@ -620,6 +611,7 @@ static void ohci_stop (struct usb_hcd *hcd)
if (!ohci->disabled)
hc_reset (ohci);
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
if (ohci->hcca) {
pci_free_consistent (ohci->hcd.pdev, sizeof *ohci->hcca,
......@@ -649,14 +641,13 @@ static int hc_restart (struct ohci_hcd *ohci)
usb_disconnect (&ohci->hcd.self.root_hub);
/* empty the interrupt branches */
for (i = 0; i < NUM_INTS; i++) ohci->ohci_int_load [i] = 0;
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
/* no EDs to remove */
ohci->ed_rm_list = NULL;
/* empty control and bulk lists */
ohci->ed_isotail = NULL;
ohci->ed_controltail = NULL;
ohci->ed_bulktail = NULL;
......
......@@ -5,7 +5,6 @@
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
*
* This file is licenced under the GPL.
* $Id: ohci-mem.c,v 1.3 2002/03/22 16:04:54 dbrownell Exp $
*/
/*-------------------------------------------------------------------------*/
......@@ -52,13 +51,6 @@ dma_to_ed_td (struct hash_list_t * entry, dma_addr_t dma)
return scan->virt;
}
static struct ed *
dma_to_ed (struct ohci_hcd *hc, dma_addr_t ed_dma)
{
return (struct ed *) dma_to_ed_td(&(hc->ed_hash [ED_HASH_FUNC(ed_dma)]),
ed_dma);
}
static struct td *
dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
{
......@@ -97,13 +89,6 @@ hash_add_ed_td (
return 1;
}
static inline int
hash_add_ed (struct ohci_hcd *hc, struct ed *ed, int mem_flags)
{
return hash_add_ed_td (&(hc->ed_hash [ED_HASH_FUNC (ed->dma)]),
ed, ed->dma, mem_flags);
}
static inline int
hash_add_td (struct ohci_hcd *hc, struct td *td, int mem_flags)
{
......@@ -138,12 +123,6 @@ hash_free_ed_td (struct hash_list_t *entry, void *virt)
}
}
static inline void
hash_free_ed (struct ohci_hcd *hc, struct ed * ed)
{
hash_free_ed_td (&(hc->ed_hash[ED_HASH_FUNC(ed->dma)]), ed);
}
static inline void
hash_free_td (struct ohci_hcd *hc, struct td * td)
{
......@@ -223,11 +202,6 @@ ed_alloc (struct ohci_hcd *hc, int mem_flags)
memset (ed, 0, sizeof (*ed));
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
/* hash it for later reverse mapping */
if (!hash_add_ed (hc, ed, mem_flags)) {
pci_pool_free (hc->ed_cache, ed, dma);
return NULL;
}
}
return ed;
}
......@@ -235,7 +209,6 @@ ed_alloc (struct ohci_hcd *hc, int mem_flags)
static void
ed_free (struct ohci_hcd *hc, struct ed *ed)
{
hash_free_ed (hc, ed);
pci_pool_free (hc->ed_cache, ed, ed->dma);
}
......@@ -5,7 +5,6 @@
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
*
* This file is licenced under the GPL.
* $Id: ohci-q.c,v 1.8 2002/03/27 20:57:01 dbrownell Exp $
*/
static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
......@@ -52,6 +51,15 @@ static void finish_urb (struct ohci_hcd *ohci, struct urb *urb)
urb->status = 0;
spin_unlock_irqrestore (&urb->lock, flags);
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
ohci->hcd.self.bandwidth_isoc_reqs--;
break;
case PIPE_INTERRUPT:
ohci->hcd.self.bandwidth_int_reqs--;
break;
}
#ifdef OHCI_VERBOSE_DEBUG
urb_print (urb, "RET", usb_pipeout (urb->pipe));
#endif
......@@ -111,67 +119,111 @@ static inline void intr_resub (struct ohci_hcd *hc, struct urb *urb)
* ED handling functions
*-------------------------------------------------------------------------*/
/* search for the right branch to insert an interrupt ed into the int tree
* do some load balancing;
* returns the branch
* FIXME allow for failure, when there's no bandwidth left;
* and consider iso loads too
/* search for the right schedule branch to use for a periodic ed.
* does some load balancing; returns the branch, or negative errno.
*/
static int ep_int_balance (struct ohci_hcd *ohci, int interval, int load)
static int balance (struct ohci_hcd *ohci, int interval, int load)
{
int i, branch = 0;
/* search for the least loaded interrupt endpoint branch */
for (i = 0; i < NUM_INTS ; i++)
if (ohci->ohci_int_load [branch] > ohci->ohci_int_load [i])
branch = i;
int i, branch = -ENOSPC;
branch = branch % interval;
for (i = branch; i < NUM_INTS; i += interval)
ohci->ohci_int_load [i] += load;
/* iso periods can be huge; iso tds specify frame numbers */
if (interval > NUM_INTS)
interval = NUM_INTS;
/* search for the least loaded schedule branch of that period
* that has enough bandwidth left unreserved.
*/
for (i = 0; i < interval ; i++) {
if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
#ifdef CONFIG_USB_BANDWIDTH
int j;
/* usb 1.1 says 90% of one frame */
for (j = i; j < NUM_INTS; j += interval) {
if ((ohci->load [j] + load) > 900)
break;
}
if (j < NUM_INTS)
continue;
#endif
branch = i;
}
}
return branch;
}
/*-------------------------------------------------------------------------*/
/* the int tree is a binary tree
* in order to process it sequentially the indexes of the branches have
* to be mapped the mapping reverses the bits of a word of num_bits length
/* both iso and interrupt requests have periods; this routine puts them
* into the schedule tree in the apppropriate place. most iso devices use
* 1msec periods, but that's not required.
*/
static int ep_rev (int num_bits, int word)
static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
{
int i, wout = 0;
unsigned i;
for (i = 0; i < num_bits; i++)
wout |= (( (word >> i) & 1) << (num_bits - i - 1));
return wout;
}
dbg ("%s: link %sed %p branch %d [%dus.], interval %d",
ohci->hcd.self.bus_name,
(ed->hwINFO & ED_ISO) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
/*-------------------------------------------------------------------------*/
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed **prev = &ohci->periodic [i];
u32 *prev_p = &ohci->hcca->int_table [i];
struct ed *here = *prev;
/* sorting each branch by period (slow before fast)
* lets us share the faster parts of the tree.
* (plus maybe: put interrupt eds before iso)
*/
while (here && ed != here) {
if (ed->interval > here->interval)
break;
prev = &here->ed_next;
prev_p = &here->hwNextED;
here = *prev;
}
if (ed != here) {
ed->ed_next = here;
if (here)
ed->hwNextED = *prev_p;
wmb ();
*prev = ed;
*prev_p = cpu_to_le32p (&ed->dma);
}
ohci->load [i] += ed->load;
}
ohci->hcd.self.bandwidth_allocated += ed->load / ed->interval;
}
/* link an ed into one of the HC chains */
static void ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int int_branch, i;
int inter, interval, load;
__u32 *ed_p;
int branch;
ed->state = ED_OPER;
ed->ed_prev = 0;
ed->ed_next = 0;
ed->hwNextED = 0;
wmb ();
/* we care about rm_list when setting CLE/BLE in case the HC was at
* work on some TD when CLE/BLE was turned off, and isn't quiesced
* yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
*
* control and bulk EDs are doubly linked (ed_next, ed_prev), but
* periodic ones are singly linked (ed_next). that's because the
* periodic schedule encodes a tree like figure 3-5 in the ohci
* spec: each qh can have several "previous" nodes, and the tree
* doesn't have unused/idle descriptors.
*/
switch (ed->type) {
case PIPE_CONTROL:
if (ohci->ed_controltail == NULL) {
writel (ed->dma, &ohci->regs->ed_controlhead);
} else {
ohci->ed_controltail->ed_next = ed;
ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
}
ed->ed_prev = ohci->ed_controltail;
......@@ -187,6 +239,7 @@ static void ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
if (ohci->ed_bulktail == NULL) {
writel (ed->dma, &ohci->regs->ed_bulkhead);
} else {
ohci->ed_bulktail->ed_next = ed;
ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
}
ed->ed_prev = ohci->ed_bulktail;
......@@ -198,74 +251,55 @@ static void ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
ohci->ed_bulktail = ed;
break;
case PIPE_INTERRUPT:
load = ed->intriso.intr_info.int_load;
interval = ed->interval;
int_branch = ep_int_balance (ohci, interval, load);
ed->intriso.intr_info.int_branch = int_branch;
for (i = 0; i < ep_rev (6, interval); i += inter) {
inter = 1;
for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i) + int_branch]);
(*ed_p != 0) && ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval >= interval);
ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED))
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
ed->hwNextED = *ed_p;
*ed_p = cpu_to_le32 (ed->dma);
// case PIPE_INTERRUPT:
// case PIPE_ISOCHRONOUS:
default:
branch = balance (ohci, ed->interval, ed->load);
if (branch < 0) {
dbg ("%s: ERR %d, interval %d msecs, load %d",
ohci->hcd.self.bus_name,
branch, ed->interval, ed->load);
// FIXME if there are TDs queued, fail them!
return branch;
}
wmb ();
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_INT");
#endif
break;
case PIPE_ISOCHRONOUS:
ed->ed_prev = ohci->ed_isotail;
if (ohci->ed_isotail != NULL) {
ohci->ed_isotail->hwNextED = cpu_to_le32 (ed->dma);
} else {
for ( i = 0; i < NUM_INTS; i += inter) {
inter = 1;
for (ed_p = & (ohci->hcca->int_table [ep_rev (5, i)]);
*ed_p != 0;
ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED))
inter = ep_rev (6, (dma_to_ed (ohci, le32_to_cpup (ed_p)))->interval);
*ed_p = cpu_to_le32 (ed->dma);
}
}
wmb ();
ohci->ed_isotail = ed;
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "LINK_ISO");
#endif
break;
ed->branch = branch;
periodic_link (ohci, ed);
}
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
return 0;
}
/*-------------------------------------------------------------------------*/
/* scan the periodic table to find and unlink this ED */
static void periodic_unlink (
struct ohci_hcd *ohci,
struct ed *ed,
unsigned index,
unsigned period
) {
for (; index < NUM_INTS; index += period) {
__u32 *ed_p = &ohci->hcca->int_table [index];
static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
int i;
while (*ed_p != 0) {
if ((dma_to_ed (ohci, le32_to_cpup (ed_p))) == ed) {
*ed_p = ed->hwNextED;
break;
}
ed_p = & ((dma_to_ed (ohci, le32_to_cpup (ed_p)))->hwNextED);
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed *temp;
struct ed **prev = &ohci->periodic [i];
u32 *prev_p = &ohci->hcca->int_table [i];
while (*prev && (temp = *prev) != ed) {
prev_p = &temp->hwNextED;
prev = &temp->ed_next;
}
if (*prev) {
*prev_p = ed->hwNextED;
*prev = ed->ed_next;
}
ohci->load [i] -= ed->load;
}
ohci->hcd.self.bandwidth_allocated -= ed->load / ed->interval;
dbg ("%s: unlink %sed %p branch %d [%dus.], interval %d",
ohci->hcd.self.bus_name,
(ed->hwINFO & ED_ISO) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
/* unlink an ed from one of the HC chains.
......@@ -275,8 +309,6 @@ static void periodic_unlink (
*/
static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
{
int i;
ed->hwINFO |= ED_SKIP;
switch (ed->type) {
......@@ -289,13 +321,15 @@ static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
writel (le32_to_cpup (&ed->hwNextED),
&ohci->regs->ed_controlhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
if (ohci->ed_controltail == ed) {
ohci->ed_controltail = ed->ed_prev;
if (ohci->ed_controltail)
ohci->ed_controltail->ed_next = 0;
} else {
(dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED)))
->ed_prev = ed->ed_prev;
ed->ed_next->ed_prev = ed->ed_prev;
}
break;
......@@ -308,50 +342,33 @@ static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
writel (le32_to_cpup (&ed->hwNextED),
&ohci->regs->ed_bulkhead);
} else {
ed->ed_prev->ed_next = ed->ed_next;
ed->ed_prev->hwNextED = ed->hwNextED;
}
if (ohci->ed_bulktail == ed) {
ohci->ed_bulktail = ed->ed_prev;
if (ohci->ed_bulktail)
ohci->ed_bulktail->ed_next = 0;
} else {
(dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED)))
->ed_prev = ed->ed_prev;
ed->ed_next->ed_prev = ed->ed_prev;
}
break;
case PIPE_INTERRUPT:
periodic_unlink (ohci, ed, ed->intriso.intr_info.int_branch, ed->interval);
for (i = ed->intriso.intr_info.int_branch; i < NUM_INTS; i += ed->interval)
ohci->ohci_int_load [i] -= ed->intriso.intr_info.int_load;
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "UNLINK_INT");
#endif
break;
case PIPE_ISOCHRONOUS:
if (ohci->ed_isotail == ed)
ohci->ed_isotail = ed->ed_prev;
if (ed->hwNextED != 0)
(dma_to_ed (ohci, le32_to_cpup (&ed->hwNextED)))
->ed_prev = ed->ed_prev;
if (ed->ed_prev != NULL)
ed->ed_prev->hwNextED = ed->hwNextED;
else
periodic_unlink (ohci, ed, 0, 1);
#ifdef OHCI_VERBOSE_DEBUG
ohci_dump_periodic (ohci, "UNLINK_ISO");
#endif
// case PIPE_INTERRUPT:
// case PIPE_ISOCHRONOUS:
default:
periodic_unlink (ohci, ed);
break;
}
/* FIXME Except for a couple of exceptionally clean unlink cases
/* NOTE: Except for a couple of exceptionally clean unlink cases
* (like unlinking the only c/b ED, with no TDs) HCs may still be
* caching this (till SOF).
*
* To avoid racing with the hardware, this needs to use ED_UNLINK
* and delay til next INTR_SF. Merge with start_urb_unlink().
* caching this operational ED (or its address). Safe unlinking
* involves not marking it ED_IDLE till INTR_SF; we always do that
* if td_list isn't empty. Otherwise the race is small; but ...
*/
ed->state = ED_IDLE;
if (ed->state == ED_OPER)
ed->state = ED_IDLE;
}
......@@ -369,7 +386,6 @@ static struct ed *ed_get (
) {
int is_out = !usb_pipein (pipe);
int type = usb_pipetype (pipe);
int bus_msecs = 0;
struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv;
struct ed *ed;
unsigned ep;
......@@ -378,9 +394,6 @@ static struct ed *ed_get (
ep = usb_pipeendpoint (pipe) << 1;
if (type != PIPE_CONTROL && is_out)
ep |= 1;
if (type == PIPE_INTERRUPT)
bus_msecs = usb_calc_bus_time (udev->speed, !is_out, 0,
usb_maxpacket (udev, pipe, is_out)) / 1000;
spin_lock_irqsave (&ohci->lock, flags);
......@@ -422,23 +435,25 @@ static struct ed *ed_get (
info = cpu_to_le32 (info);
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* control transfers store pids in tds */
/* only control transfers store pids in tds */
if (type != PIPE_CONTROL) {
info |= is_out ? ED_OUT : ED_IN;
if (type == PIPE_ISOCHRONOUS)
info |= ED_ISO;
if (type == PIPE_INTERRUPT) {
ed->intriso.intr_info.int_load = bus_msecs;
if (interval > 32)
if (type != PIPE_BULK) {
/* periodic transfers... */
if (type == PIPE_ISOCHRONOUS)
info |= ED_ISO;
else if (interval > 32) /* iso can be bigger */
interval = 32;
ed->interval = interval;
ed->load = usb_calc_bus_time (
udev->speed, !is_out,
type == PIPE_ISOCHRONOUS,
usb_maxpacket (udev, pipe, is_out))
/ 1000;
}
}
ed->hwINFO = info;
/* value ignored except on periodic EDs, where
* we know it's already a power of 2
*/
ed->interval = interval;
#ifdef DEBUG
/*
* There are two other cases we ought to change hwINFO, both during
......@@ -473,8 +488,9 @@ static struct ed *ed_get (
*/
static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
ed_deschedule (ohci, ed);
ed->hwINFO |= ED_DEQUEUE;
ed->state = ED_UNLINK;
ed_deschedule (ohci, ed);
/* SF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
......@@ -483,7 +499,9 @@ static void start_urb_unlink (struct ohci_hcd *ohci, struct ed *ed)
*/
ed->tick = le16_to_cpu (ohci->hcca->frame_no) + 1;
/* rm_list is just singly linked, for simplicity */
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = 0;
ohci->ed_rm_list = ed;
/* enable SOF interrupt */
......@@ -529,7 +547,6 @@ td_fill (unsigned int info,
/* use this td as the next dummy */
td_pt = urb_priv->td [index];
td_pt->hwNextTD = 0;
/* fill the old dummy TD */
td = urb_priv->td [index] = urb_priv->ed->dummy;
......@@ -547,7 +564,7 @@ td_fill (unsigned int info,
if (is_iso) {
td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);
td->ed->intriso.last_iso = info & 0xffff;
td->ed->last_iso = info & 0xffff;
} else {
td->hwCBP = cpu_to_le32 (data);
}
......@@ -608,9 +625,11 @@ static void td_submit_urb (
/* Bulk and interrupt are identical except for where in the schedule
* their EDs live.
*/
// case PIPE_BULK:
// case PIPE_INTERRUPT:
default:
case PIPE_INTERRUPT:
/* ... and periodic urbs have extra accounting */
ohci->hcd.self.bandwidth_int_reqs++;
/* FALLTHROUGH */
case PIPE_BULK:
info = is_out
? TD_T_TOGGLE | TD_CC | TD_DP_OUT
: TD_T_TOGGLE | TD_CC | TD_DP_IN;
......@@ -676,6 +695,7 @@ static void td_submit_urb (
data + urb->iso_frame_desc [cnt].offset,
urb->iso_frame_desc [cnt].length, urb, cnt);
}
ohci->hcd.self.bandwidth_isoc_reqs++;
break;
}
if (urb_priv->length != cnt)
......@@ -802,6 +822,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
if (td_list->ed->hwHeadP & ED_H) {
if (urb_priv && ((td_list->index + 1)
< urb_priv->length)) {
#ifdef DEBUG
struct urb *urb = td_list->urb;
/* help for troubleshooting: */
......@@ -817,6 +838,7 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
1 + td_list->index,
urb_priv->length,
cc, cc_to_error [cc]);
#endif
td_list->ed->hwHeadP =
(urb_priv->td [urb_priv->length - 1]->hwNextTD
& __constant_cpu_to_le32 (TD_MASK))
......@@ -872,8 +894,7 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
* we call a completion since it might have unlinked
* another (earlier) urb
*
* FIXME use td_list to scan, not ed hashtables.
* completely abolish ed hashtables!
* FIXME use td_list to scan, not td hashtables.
*/
rescan_this:
completed = 0;
......@@ -894,8 +915,7 @@ static void finish_unlinks (struct ohci_hcd *ohci, u16 tick)
td_done (urb, td);
urb_priv->td_cnt++;
*td_p = td->hwNextTD | (*td_p
& __constant_cpu_to_le32 (0x3));
*td_p = td->hwNextTD | (*td_p & ~TD_MASK);
/* URB is done; clean up */
if (urb_priv->td_cnt == urb_priv->length) {
......
......@@ -5,7 +5,6 @@
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
*
* This file is licenced under the GPL.
* $Id: ohci.h,v 1.6 2002/03/22 16:04:54 dbrownell Exp $
*/
/*
......@@ -18,13 +17,16 @@
struct ed {
/* first fields are hardware-specified, le32 */
__u32 hwINFO; /* endpoint config bitmap */
/* info bits defined by hcd */
#define ED_DEQUEUE __constant_cpu_to_le32(1 << 27)
/* info bits defined by the hardware */
#define ED_ISO __constant_cpu_to_le32(1 << 15)
#define ED_SKIP __constant_cpu_to_le32(1 << 14)
#define ED_LOWSPEED __constant_cpu_to_le32(1 << 13)
#define ED_OUT __constant_cpu_to_le32(0x01 << 11)
#define ED_IN __constant_cpu_to_le32(0x02 << 11)
__u32 hwTailP; /* tail of TD list */
__u32 hwHeadP; /* head of TD list */
__u32 hwHeadP; /* head of TD list (hc r/w) */
#define ED_C __constant_cpu_to_le32(0x02) /* toggle carry */
#define ED_H __constant_cpu_to_le32(0x01) /* halted */
__u32 hwNextED; /* next ED in list */
......@@ -48,14 +50,12 @@ struct ed {
#define ED_OPER 0x02 /* IS linked to hc */
u8 type; /* PIPE_{BULK,...} */
u16 interval; /* interrupt, isochronous */
union {
struct intr_info { /* interrupt */
u8 int_branch;
u8 int_load;
} intr_info;
u16 last_iso; /* isochronous */
} intriso;
/* periodic scheduling params (for intr and iso) */
u8 branch;
u16 interval;
u16 load;
u16 last_iso; /* iso only */
/* HC may see EDs on rm_list until next frame (frame_no == tick) */
u16 tick;
......@@ -335,10 +335,8 @@ struct hash_list_t {
};
#define TD_HASH_SIZE 64 /* power'o'two */
#define ED_HASH_SIZE 64 /* power'o'two */
#define TD_HASH_FUNC(td_dma) ((td_dma ^ (td_dma >> 5)) % TD_HASH_SIZE)
#define ED_HASH_FUNC(ed_dma) ((ed_dma ^ (ed_dma >> 5)) % ED_HASH_SIZE)
/*
......@@ -373,7 +371,7 @@ struct ohci_hcd {
struct ed *ed_bulktail; /* last in bulk list */
struct ed *ed_controltail; /* last in ctrl list */
struct ed *ed_isotail; /* last in iso list */
struct ed *periodic [NUM_INTS]; /* shadow int_table */
/*
* memory management for queue data structures
......@@ -381,14 +379,13 @@ struct ohci_hcd {
struct pci_pool *td_cache;
struct pci_pool *ed_cache;
struct hash_list_t td_hash [TD_HASH_SIZE];
struct hash_list_t ed_hash [ED_HASH_SIZE];
/*
* driver state
*/
int disabled; /* e.g. got a UE, we're hung */
int sleeping;
int ohci_int_load [NUM_INTS];
int load [NUM_INTS];
u32 hc_control; /* copy of hc control reg */
unsigned long flags; /* for HC bugs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment