Commit 0ed8fee1 authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

[PATCH] UHCI: remove main list of URBs

As part of reorienting uhci-hcd away from URBs and toward endpoint
queues, this patch (as625) eliminates the driver's main list of URBs.
The list wsa used mainly in checking for URB completions; now the driver
goes through the list of active endpoints and checks the members of the
queues.

As a side effect, I had to remove the code that looks for FSBR timeouts.
For now, FSBR will remain on so long as any URBs on a full-speed control
or bulk queue request it, even if the queue isn't advancing.  A later
patch can add more intelligent handling.  This isn't a huge drawback;
it's pretty rare for an URB to get stuck for more than a fraction of a
second.  (And it will help the people trying to use those insane HP USB
devices.)
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent af0bb599
...@@ -114,7 +114,6 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space) ...@@ -114,7 +114,6 @@ static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
} }
out += sprintf(out, "%s", (urbp->fsbr ? " FSBR" : "")); out += sprintf(out, "%s", (urbp->fsbr ? " FSBR" : ""));
out += sprintf(out, "%s", (urbp->fsbr_timeout ? " FSBR_TO" : ""));
if (urbp->urb->status != -EINPROGRESS) if (urbp->urb->status != -EINPROGRESS)
out += sprintf(out, " Status=%d", urbp->urb->status); out += sprintf(out, " Status=%d", urbp->urb->status);
......
...@@ -491,8 +491,6 @@ static int uhci_start(struct usb_hcd *hcd) ...@@ -491,8 +491,6 @@ static int uhci_start(struct usb_hcd *hcd)
spin_lock_init(&uhci->lock); spin_lock_init(&uhci->lock);
INIT_LIST_HEAD(&uhci->td_remove_list); INIT_LIST_HEAD(&uhci->td_remove_list);
INIT_LIST_HEAD(&uhci->urb_list);
INIT_LIST_HEAD(&uhci->complete_list);
INIT_LIST_HEAD(&uhci->idle_qh_list); INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh); init_waitqueue_head(&uhci->waitqh);
......
...@@ -132,6 +132,10 @@ struct uhci_qh { ...@@ -132,6 +132,10 @@ struct uhci_qh {
unsigned int unlink_frame; /* When the QH was unlinked */ unsigned int unlink_frame; /* When the QH was unlinked */
int state; /* QH_STATE_xxx; see above */ int state; /* QH_STATE_xxx; see above */
unsigned int initial_toggle:1; /* Endpoint's current toggle value */
unsigned int needs_fixup:1; /* Must fix the TD toggle values */
unsigned int is_stopped:1; /* Queue was stopped by an error */
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
/* /*
...@@ -384,6 +388,7 @@ struct uhci_hcd { ...@@ -384,6 +388,7 @@ struct uhci_hcd {
struct uhci_td *term_td; /* Terminating TD, see UHCI bug */ struct uhci_td *term_td; /* Terminating TD, see UHCI bug */
struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QHs */ struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QHs */
struct uhci_qh *next_qh; /* Next QH to scan */
spinlock_t lock; spinlock_t lock;
...@@ -413,16 +418,10 @@ struct uhci_hcd { ...@@ -413,16 +418,10 @@ struct uhci_hcd {
unsigned long resuming_ports; unsigned long resuming_ports;
unsigned long ports_timeout; /* Time to stop signalling */ unsigned long ports_timeout; /* Time to stop signalling */
/* Main list of URBs currently controlled by this HC */
struct list_head urb_list;
/* List of TDs that are done, but waiting to be freed (race) */ /* List of TDs that are done, but waiting to be freed (race) */
struct list_head td_remove_list; struct list_head td_remove_list;
unsigned int td_remove_age; /* Age in frames */ unsigned int td_remove_age; /* Age in frames */
/* List of URBs awaiting completion callback */
struct list_head complete_list;
struct list_head idle_qh_list; /* Where the idle QHs live */ struct list_head idle_qh_list; /* Where the idle QHs live */
int rh_numports; /* Number of root-hub ports */ int rh_numports; /* Number of root-hub ports */
...@@ -448,7 +447,6 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci) ...@@ -448,7 +447,6 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
* Private per-URB data * Private per-URB data
*/ */
struct urb_priv { struct urb_priv {
struct list_head urb_list;
struct list_head node; /* Node in the QH's urbp list */ struct list_head node; /* Node in the QH's urbp list */
struct urb *urb; struct urb *urb;
...@@ -456,10 +454,7 @@ struct urb_priv { ...@@ -456,10 +454,7 @@ struct urb_priv {
struct uhci_qh *qh; /* QH for this URB */ struct uhci_qh *qh; /* QH for this URB */
struct list_head td_list; struct list_head td_list;
unsigned long fsbrtime; /* In jiffies */
unsigned fsbr : 1; /* URB turned on FSBR */ unsigned fsbr : 1; /* URB turned on FSBR */
unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
unsigned short_transfer : 1; /* URB got a short transfer, no unsigned short_transfer : 1; /* URB got a short transfer, no
* need to rescan */ * need to rescan */
}; };
......
...@@ -151,53 +151,6 @@ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) ...@@ -151,53 +151,6 @@ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
wmb(); wmb();
} }
/*
* Remove an URB's TDs from the hardware schedule
*/
static void uhci_remove_tds_from_schedule(struct uhci_hcd *uhci,
struct urb *urb, int status)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
/* Isochronous TDs get unlinked directly from the frame list */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
uhci_unlink_isochronous_tds(uhci, urb);
return;
}
/* If the URB isn't first on its queue, adjust the link pointer
* of the last TD in the previous URB. */
if (urbp->node.prev != &urbp->qh->queue) {
struct urb_priv *purbp;
struct uhci_td *ptd, *ltd;
if (status == -EINPROGRESS)
status = 0;
purbp = list_entry(urbp->node.prev, struct urb_priv, node);
ptd = list_entry(purbp->td_list.prev, struct uhci_td,
list);
ltd = list_entry(urbp->td_list.prev, struct uhci_td,
list);
ptd->link = ltd->link;
}
/* If the URB completed with an error, then the QH element certainly
* points to one of the URB's TDs. If it completed normally then
* the QH element has certainly moved on to the next URB. And if
* the URB is still in progress then it must have been dequeued.
* The QH element either hasn't reached it yet or is somewhere in
* the middle. If the URB wasn't first we can assume that it
* hasn't started yet (see above): Otherwise all the preceding URBs
* would have completed and been removed from the queue, so this one
* _would_ be first.
*
* If the QH element is inside this URB, clear it. It will be
* set properly when the QH is activated.
*/
if (status < 0)
urbp->qh->element = UHCI_PTR_TERM;
}
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
struct usb_device *udev, struct usb_host_endpoint *hep) struct usb_device *udev, struct usb_host_endpoint *hep)
{ {
...@@ -250,6 +203,90 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) ...@@ -250,6 +203,90 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
} }
/*
* When the currently executing URB is dequeued, save its current toggle value
*/
static void uhci_save_toggle(struct uhci_qh *qh, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
struct uhci_td *td;
/* If the QH element pointer is UHCI_PTR_TERM then then currently
* executing URB has already been unlinked, so this one isn't it. */
if (qh_element(qh) == UHCI_PTR_TERM ||
qh->queue.next != &urbp->node)
return;
qh->element = UHCI_PTR_TERM;
/* Only bulk and interrupt pipes have to worry about toggles */
if (!(usb_pipetype(urb->pipe) == PIPE_BULK ||
usb_pipetype(urb->pipe) == PIPE_INTERRUPT))
return;
/* Find the first active TD; that's the device's toggle state */
list_for_each_entry(td, &urbp->td_list, list) {
if (td_status(td) & TD_CTRL_ACTIVE) {
qh->needs_fixup = 1;
qh->initial_toggle = uhci_toggle(td_token(td));
return;
}
}
WARN_ON(1);
}
/*
* Fix up the data toggles for URBs in a queue, when one of them
* terminates early (short transfer, error, or dequeued).
*/
static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
{
struct urb_priv *urbp = NULL;
struct uhci_td *td;
unsigned int toggle = qh->initial_toggle;
unsigned int pipe;
/* Fixups for a short transfer start with the second URB in the
* queue (the short URB is the first). */
if (skip_first)
urbp = list_entry(qh->queue.next, struct urb_priv, node);
/* When starting with the first URB, if the QH element pointer is
* still valid then we know the URB's toggles are okay. */
else if (qh_element(qh) != UHCI_PTR_TERM)
toggle = 2;
/* Fix up the toggle for the URBs in the queue. Normally this
* loop won't run more than once: When an error or short transfer
* occurs, the queue usually gets emptied. */
list_prepare_entry(urbp, &qh->queue, node);
list_for_each_entry_continue(urbp, &qh->queue, node) {
/* If the first TD has the right toggle value, we don't
* need to change any toggles in this URB */
td = list_entry(urbp->td_list.next, struct uhci_td, list);
if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
td = list_entry(urbp->td_list.next, struct uhci_td,
list);
toggle = uhci_toggle(td_token(td)) ^ 1;
/* Otherwise all the toggles in the URB have to be switched */
} else {
list_for_each_entry(td, &urbp->td_list, list) {
td->token ^= __constant_cpu_to_le32(
TD_TOKEN_TOGGLE);
toggle ^= 1;
}
}
}
wmb();
pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
usb_pipeout(pipe), toggle);
qh->needs_fixup = 0;
}
/* /*
* Put a QH on the schedule in both hardware and software * Put a QH on the schedule in both hardware and software
*/ */
...@@ -276,6 +313,9 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) ...@@ -276,6 +313,9 @@ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
/* Move the QH from its old list to the end of the appropriate /* Move the QH from its old list to the end of the appropriate
* skeleton's list */ * skeleton's list */
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_move_tail(&qh->node, &qh->skel->node); list_move_tail(&qh->node, &qh->skel->node);
/* Link it into the schedule */ /* Link it into the schedule */
...@@ -310,6 +350,9 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) ...@@ -310,6 +350,9 @@ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
uhci_set_next_interrupt(uhci); uhci_set_next_interrupt(uhci);
/* Move the QH from its old list to the end of the unlinking list */ /* Move the QH from its old list to the end of the unlinking list */
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
} }
...@@ -323,6 +366,9 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) ...@@ -323,6 +366,9 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
{ {
WARN_ON(qh->state == QH_STATE_ACTIVE); WARN_ON(qh->state == QH_STATE_ACTIVE);
if (qh == uhci->next_qh)
uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
node);
list_move(&qh->node, &uhci->idle_qh_list); list_move(&qh->node, &uhci->idle_qh_list);
qh->state = QH_STATE_IDLE; qh->state = QH_STATE_IDLE;
...@@ -344,11 +390,9 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, ...@@ -344,11 +390,9 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
urbp->urb = urb; urbp->urb = urb;
urb->hcpriv = urbp; urb->hcpriv = urbp;
urbp->fsbrtime = jiffies;
INIT_LIST_HEAD(&urbp->node); INIT_LIST_HEAD(&urbp->node);
INIT_LIST_HEAD(&urbp->td_list); INIT_LIST_HEAD(&urbp->td_list);
INIT_LIST_HEAD(&urbp->urb_list);
return urbp; return urbp;
} }
...@@ -373,9 +417,6 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci, ...@@ -373,9 +417,6 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
{ {
struct uhci_td *td, *tmp; struct uhci_td *td, *tmp;
if (!list_empty(&urbp->urb_list))
dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list!\n",
urbp->urb);
if (!list_empty(&urbp->node)) if (!list_empty(&urbp->node))
dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
urbp->urb); urbp->urb);
...@@ -452,71 +493,6 @@ static int uhci_map_status(int status, int dir_out) ...@@ -452,71 +493,6 @@ static int uhci_map_status(int status, int dir_out)
return 0; return 0;
} }
/*
* Fix up the data toggles for URBs in a queue, when one of them
* terminates early (short transfer, error, or dequeued).
*/
static void uhci_fixup_toggles(struct urb *urb)
{
struct list_head *head;
struct uhci_td *td;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
int prevactive = 0;
unsigned int toggle = 0;
struct urb_priv *turbp, *list_end;
/*
* We need to find out what the last successful toggle was so
* we can update the data toggles for the following transfers.
*
* There are 2 ways the last successful completed TD is found:
*
* 1) The TD is NOT active and the actual length < expected length
* 2) The TD is NOT active and it's the last TD in the chain
*
* and a third way the first uncompleted TD is found:
*
* 3) The TD is active and the previous TD is NOT active
*/
head = &urbp->td_list;
list_for_each_entry(td, head, list) {
unsigned int ctrlstat = td_status(td);
if (!(ctrlstat & TD_CTRL_ACTIVE) &&
(uhci_actual_length(ctrlstat) <
uhci_expected_length(td_token(td)) ||
td->list.next == head))
toggle = uhci_toggle(td_token(td)) ^ 1;
else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
toggle = uhci_toggle(td_token(td));
prevactive = ctrlstat & TD_CTRL_ACTIVE;
}
/*
* Fix up the toggle for the following URBs in the queue.
*
* We can stop as soon as we find an URB with toggles set correctly,
* because then all the following URBs will be correct also.
*/
list_end = list_entry(&urbp->qh->queue, struct urb_priv, node);
turbp = urbp;
while ((turbp = list_entry(turbp->node.next, struct urb_priv, node))
!= list_end) {
td = list_entry(turbp->td_list.next, struct uhci_td, list);
if (uhci_toggle(td_token(td)) == toggle)
return;
list_for_each_entry(td, &turbp->td_list, list) {
td->token ^= __constant_cpu_to_le32(TD_TOKEN_TOGGLE);
toggle ^= 1;
}
}
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), toggle);
}
/* /*
* Control transfers * Control transfers
*/ */
...@@ -765,6 +741,9 @@ static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) ...@@ -765,6 +741,9 @@ static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
} }
} }
/* Note that the queue has stopped */
urbp->qh->element = UHCI_PTR_TERM;
urbp->qh->is_stopped = 1;
return ret; return ret;
} }
...@@ -927,7 +906,10 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) ...@@ -927,7 +906,10 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
*/ */
if (!urbp->short_transfer) { if (!urbp->short_transfer) {
urbp->short_transfer = 1; urbp->short_transfer = 1;
uhci_fixup_toggles(urb); urbp->qh->initial_toggle =
uhci_toggle(td_token(td)) ^ 1;
uhci_fixup_toggles(urbp->qh, 1);
td = list_entry(urbp->td_list.prev, td = list_entry(urbp->td_list.prev,
struct uhci_td, list); struct uhci_td, list);
urbp->qh->element = td->link; urbp->qh->element = td->link;
...@@ -962,6 +944,13 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) ...@@ -962,6 +944,13 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
} }
} }
#endif #endif
/* Note that the queue has stopped and save the next toggle value */
urbp->qh->element = UHCI_PTR_TERM;
urbp->qh->is_stopped = 1;
urbp->qh->needs_fixup = 1;
urbp->qh->initial_toggle = uhci_toggle(td_token(td)) ^
(ret == -EREMOTEIO);
return ret; return ret;
} }
...@@ -995,76 +984,39 @@ static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, ...@@ -995,76 +984,39 @@ static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
/* /*
* Isochronous transfers * Isochronous transfers
*/ */
static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end) static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
{ struct uhci_qh *qh)
struct urb *last_urb = NULL;
struct urb_priv *up;
int ret = 0;
list_for_each_entry(up, &uhci->urb_list, urb_list) {
struct urb *u = up->urb;
/* look for pending URBs with identical pipe handle */
if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
(u->status == -EINPROGRESS) && (u != urb)) {
if (!last_urb)
*start = u->start_frame;
last_urb = u;
}
}
if (last_urb) {
*end = (last_urb->start_frame + last_urb->number_of_packets *
last_urb->interval) & (UHCI_NUMFRAMES-1);
ret = 0;
} else
ret = -1; /* no previous urb found */
return ret;
}
static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
{ {
int limits; struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
unsigned int start = 0, end = 0; int i, frame;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
if (urb->number_of_packets > 900) /* 900? Why? */ if (urb->number_of_packets > 900) /* 900? Why? */
return -EFBIG; return -EFBIG;
limits = isochronous_find_limits(uhci, urb, &start, &end); status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
/* Figure out the starting frame number */
if (urb->transfer_flags & URB_ISO_ASAP) { if (urb->transfer_flags & URB_ISO_ASAP) {
if (limits) { if (list_empty(&qh->queue)) {
uhci_get_current_frame_number(uhci); uhci_get_current_frame_number(uhci);
urb->start_frame = (uhci->frame_number + 10) urb->start_frame = (uhci->frame_number + 10);
& (UHCI_NUMFRAMES - 1);
} else } else { /* Go right after the last one */
urb->start_frame = end; struct urb *last_urb;
last_urb = list_entry(qh->queue.prev,
struct urb_priv, node)->urb;
urb->start_frame = (last_urb->start_frame +
last_urb->number_of_packets *
last_urb->interval);
}
} else { } else {
urb->start_frame &= (UHCI_NUMFRAMES - 1);
/* FIXME: Sanity check */ /* FIXME: Sanity check */
} }
urb->start_frame &= (UHCI_NUMFRAMES - 1);
return 0;
}
/*
* Isochronous transfers
*/
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh)
{
struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
int i, ret, frame;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
ret = isochronous_find_start(uhci, urb);
if (ret)
return ret;
for (i = 0; i < urb->number_of_packets; i++) { for (i = 0; i < urb->number_of_packets; i++) {
td = uhci_alloc_td(uhci); td = uhci_alloc_td(uhci);
...@@ -1203,7 +1155,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, ...@@ -1203,7 +1155,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
/* Add this URB to the QH */ /* Add this URB to the QH */
urbp->qh = qh; urbp->qh = qh;
list_add_tail(&urbp->node, &qh->queue); list_add_tail(&urbp->node, &qh->queue);
list_add_tail(&urbp->urb_list, &uhci->urb_list);
/* If the new URB is the first and only one on this QH then either /* If the new URB is the first and only one on this QH then either
* the QH is new and idle or else it's unlinked and waiting to * the QH is new and idle or else it's unlinked and waiting to
...@@ -1224,49 +1175,66 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, ...@@ -1224,49 +1175,66 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
return ret; return ret;
} }
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb_priv *urbp;
spin_lock_irqsave(&uhci->lock, flags);
urbp = urb->hcpriv;
if (!urbp) /* URB was never linked! */
goto done;
/* Remove Isochronous TDs from the frame list ASAP */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
uhci_unlink_isochronous_tds(uhci, urb);
uhci_unlink_qh(uhci, urbp->qh);
done:
spin_unlock_irqrestore(&uhci->lock, flags);
return 0;
}
/* /*
* Return the result of a transfer * Finish unlinking an URB and give it back
*/ */
static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
struct urb *urb, struct pt_regs *regs)
__releases(uhci->lock)
__acquires(uhci->lock)
{ {
int status;
int okay_to_giveback = 0;
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
switch (usb_pipetype(urb->pipe)) { /* Isochronous TDs get unlinked directly from the frame list */
case PIPE_CONTROL: if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = uhci_result_control(uhci, urb); uhci_unlink_isochronous_tds(uhci, urb);
break;
case PIPE_ISOCHRONOUS:
status = uhci_result_isochronous(uhci, urb);
break;
default: /* PIPE_BULK or PIPE_INTERRUPT */
status = uhci_result_common(uhci, urb);
break;
}
spin_lock(&urb->lock); /* If the URB isn't first on its queue, adjust the link pointer
if (urb->status == -EINPROGRESS) { /* Not yet dequeued */ * of the last TD in the previous URB. */
if (status != -EINPROGRESS) { /* URB has completed */ else if (qh->queue.next != &urbp->node) {
urb->status = status; struct urb_priv *purbp;
struct uhci_td *ptd, *ltd;
/* If the URB got a real error (as opposed to purbp = list_entry(urbp->node.prev, struct urb_priv, node);
* simply being dequeued), we don't have to ptd = list_entry(purbp->td_list.prev, struct uhci_td,
* unlink the QH. Fix this later... */ list);
if (status < 0) ltd = list_entry(urbp->td_list.prev, struct uhci_td,
uhci_unlink_qh(uhci, urbp->qh); list);
else ptd->link = ltd->link;
okay_to_giveback = 1;
}
} else { /* Already dequeued */
if (urbp->qh->state == QH_STATE_UNLINKING &&
uhci->frame_number + uhci->is_stopped !=
urbp->qh->unlink_frame)
okay_to_giveback = 1;
} }
spin_unlock(&urb->lock);
if (!okay_to_giveback) /* Take the URB off the QH's queue. If the queue is now empty,
return; * this is a perfect time for a toggle fixup. */
list_del_init(&urbp->node);
if (list_empty(&qh->queue) && qh->needs_fixup) {
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), qh->initial_toggle);
qh->needs_fixup = 0;
}
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
uhci_free_urb_priv(uhci, urbp);
switch (usb_pipetype(urb->pipe)) { switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS: case PIPE_ISOCHRONOUS:
...@@ -1277,122 +1245,107 @@ static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) ...@@ -1277,122 +1245,107 @@ static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
case PIPE_INTERRUPT: case PIPE_INTERRUPT:
/* Release bandwidth for Interrupt or Isoc. transfers */ /* Release bandwidth for Interrupt or Isoc. transfers */
/* Make sure we don't release if we have a queued URB */ /* Make sure we don't release if we have a queued URB */
if (list_empty(&urbp->qh->queue) && urb->bandwidth) if (list_empty(&qh->queue) && urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 0); usb_release_bandwidth(urb->dev, urb, 0);
else else
/* bandwidth was passed on to queued URB, */ /* bandwidth was passed on to queued URB, */
/* so don't let usb_unlink_urb() release it */ /* so don't let usb_unlink_urb() release it */
urb->bandwidth = 0; urb->bandwidth = 0;
/* Falls through */
case PIPE_BULK:
if (status < 0)
uhci_fixup_toggles(urb);
break;
default: /* PIPE_CONTROL */
break; break;
} }
/* Take the URB's TDs off the hardware schedule */ spin_unlock(&uhci->lock);
uhci_remove_tds_from_schedule(uhci, urb, status); usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
spin_lock(&uhci->lock);
/* Take the URB off the QH's queue and see if the QH is now unused */
list_del_init(&urbp->node);
if (list_empty(&urbp->qh->queue))
uhci_unlink_qh(uhci, urbp->qh);
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ /* If the queue is now empty, we can unlink the QH and give up its
* reserved bandwidth. */
if (list_empty(&qh->queue)) {
uhci_unlink_qh(uhci, qh);
/* Queue it for giving back */ /* Bandwidth stuff not yet implemented */
list_move_tail(&urbp->urb_list, &uhci->complete_list); }
} }
/* /*
* Check out the QHs waiting to be fully unlinked * Scan the URBs in a QH's queue
*/ */
static void uhci_scan_unlinking_qhs(struct uhci_hcd *uhci) #define QH_FINISHED_UNLINKING(qh) \
{ (qh->state == QH_STATE_UNLINKING && \
struct uhci_qh *qh, *tmp; uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
list_for_each_entry_safe(qh, tmp, &uhci->skel_unlink_qh->node, node) { static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
struct pt_regs *regs)
/* If the queue is empty and the QH is fully unlinked then
* it can become IDLE. */
if (list_empty(&qh->queue)) {
if (uhci->frame_number + uhci->is_stopped !=
qh->unlink_frame)
uhci_make_qh_idle(uhci, qh);
/* If none of the QH's URBs have been dequeued then the QH
* should be re-activated. */
} else {
struct urb_priv *urbp;
int any_dequeued = 0;
list_for_each_entry(urbp, &qh->queue, node) {
if (urbp->urb->status != -EINPROGRESS) {
any_dequeued = 1;
break;
}
}
if (!any_dequeued)
uhci_activate_qh(uhci, qh);
}
}
}
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
{ {
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb_priv *urbp; struct urb_priv *urbp;
struct urb *urb;
int status;
spin_lock_irqsave(&uhci->lock, flags); while (!list_empty(&qh->queue)) {
urbp = urb->hcpriv; urbp = list_entry(qh->queue.next, struct urb_priv, node);
if (!urbp) /* URB was never linked! */ urb = urbp->urb;
goto done;
/* Remove Isochronous TDs from the frame list ASAP */ switch (usb_pipetype(urb->pipe)) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) case PIPE_CONTROL:
uhci_unlink_isochronous_tds(uhci, urb); status = uhci_result_control(uhci, urb);
uhci_unlink_qh(uhci, urbp->qh); break;
case PIPE_ISOCHRONOUS:
status = uhci_result_isochronous(uhci, urb);
break;
default: /* PIPE_BULK or PIPE_INTERRUPT */
status = uhci_result_common(uhci, urb);
break;
}
if (status == -EINPROGRESS)
break;
done: spin_lock(&urb->lock);
spin_unlock_irqrestore(&uhci->lock, flags); if (urb->status == -EINPROGRESS) /* Not dequeued */
return 0; urb->status = status;
} else
status = -ECONNRESET;
spin_unlock(&urb->lock);
static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) /* Dequeued but completed URBs can't be given back unless
{ * the QH is stopped or has finished unlinking. */
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; if (status == -ECONNRESET &&
struct list_head *head; !(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))
struct uhci_td *td; return;
int count = 0;
uhci_dec_fsbr(uhci, urb); uhci_giveback_urb(uhci, qh, urb, regs);
if (qh->is_stopped)
break;
}
urbp->fsbr_timeout = 1; /* If the QH is neither stopped nor finished unlinking (normal case),
* our work here is done. */
restart:
if (!(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))
return;
/* /* Otherwise give back each of the dequeued URBs */
* Ideally we would want to fix qh->element as well, but it's list_for_each_entry(urbp, &qh->queue, node) {
* read/write by the HC, so that can introduce a race. It's not urb = urbp->urb;
* really worth the hassle if (urb->status != -EINPROGRESS) {
*/ uhci_save_toggle(qh, urb);
uhci_giveback_urb(uhci, qh, urb, regs);
goto restart;
}
}
qh->is_stopped = 0;
head = &urbp->td_list; /* There are no more dequeued URBs. If there are still URBs on the
list_for_each_entry(td, head, list) { * queue, the QH can now be re-activated. */
/* if (!list_empty(&qh->queue)) {
* Make sure we don't do the last one (since it'll have the if (qh->needs_fixup)
* TERM bit set) as well as we skip every so many TDs to uhci_fixup_toggles(qh, 0);
* make sure it doesn't hog the bandwidth uhci_activate_qh(uhci, qh);
*/
if (td->list.next != head && (count % DEPTH_INTERVAL) ==
(DEPTH_INTERVAL - 1))
td->link |= UHCI_PTR_DEPTH;
count++;
} }
return 0; /* The queue is empty. The QH can become idle if it is fully
* unlinked. */
else if (QH_FINISHED_UNLINKING(qh))
uhci_make_qh_idle(uhci, qh);
} }
static void uhci_free_pending_tds(struct uhci_hcd *uhci) static void uhci_free_pending_tds(struct uhci_hcd *uhci)
...@@ -1406,36 +1359,13 @@ static void uhci_free_pending_tds(struct uhci_hcd *uhci) ...@@ -1406,36 +1359,13 @@ static void uhci_free_pending_tds(struct uhci_hcd *uhci)
} }
} }
static void /*
uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs) * Process events in the schedule, but only in one thread at a time
__releases(uhci->lock) */
__acquires(uhci->lock)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
uhci_free_urb_priv(uhci, (struct urb_priv *) (urb->hcpriv));
spin_unlock(&uhci->lock);
usb_hcd_giveback_urb(hcd, urb, regs);
spin_lock(&uhci->lock);
}
static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
{
struct urb_priv *urbp, *tmp;
list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
struct urb *urb = urbp->urb;
list_del_init(&urbp->urb_list);
uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
}
}
/* Process events in the schedule, but only in one thread at a time */
static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
{ {
struct urb_priv *urbp, *tmp; int i;
struct uhci_qh *qh;
/* Don't allow re-entrant calls */ /* Don't allow re-entrant calls */
if (uhci->scan_in_progress) { if (uhci->scan_in_progress) {
...@@ -1452,26 +1382,24 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) ...@@ -1452,26 +1382,24 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
uhci_free_pending_tds(uhci); uhci_free_pending_tds(uhci);
/* Walk the list of pending URBs to see which ones completed /* Go through all the QH queues and process the URBs in each one */
* (must be _safe because uhci_transfer_result() dequeues URBs) */ for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) { uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
struct urb *urb = urbp->urb; struct uhci_qh, node);
while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
/* Checks the status and does all of the magic necessary */ uhci->next_qh = list_entry(qh->node.next,
uhci_transfer_result(uhci, urb); struct uhci_qh, node);
uhci_scan_qh(uhci, qh, regs);
}
} }
uhci_finish_completion(uhci, regs);
/* If the controller is stopped, we can finish these off right now */
if (uhci->is_stopped)
uhci_free_pending_tds(uhci);
if (uhci->need_rescan) if (uhci->need_rescan)
goto rescan; goto rescan;
uhci->scan_in_progress = 0; uhci->scan_in_progress = 0;
/* Check out the QHs waiting for unlinking */ /* If the controller is stopped, we can finish these off right now */
uhci_scan_unlinking_qhs(uhci); if (uhci->is_stopped)
uhci_free_pending_tds(uhci);
if (list_empty(&uhci->td_remove_list) && if (list_empty(&uhci->td_remove_list) &&
list_empty(&uhci->skel_unlink_qh->node)) list_empty(&uhci->skel_unlink_qh->node))
...@@ -1482,19 +1410,8 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) ...@@ -1482,19 +1410,8 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
static void check_fsbr(struct uhci_hcd *uhci) static void check_fsbr(struct uhci_hcd *uhci)
{ {
struct urb_priv *up; /* For now, don't scan URBs for FSBR timeouts.
* Add it back in later... */
list_for_each_entry(up, &uhci->urb_list, urb_list) {
struct urb *u = up->urb;
spin_lock(&u->lock);
/* Check if the FSBR timed out */
if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
uhci_fsbr_timeout(uhci, u);
spin_unlock(&u->lock);
}
/* Really disable FSBR */ /* Really disable FSBR */
if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) { if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment