Commit 3ca2a321 authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

UHCI: fix bandwidth allocation

This patch (as840) fixes the bandwidth allocation mechanism in
uhci-hcd.  It has never worked correctly.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 6a6c957e
...@@ -168,9 +168,13 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space) ...@@ -168,9 +168,13 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
space, "", qh, qtype, space, "", qh, qtype,
le32_to_cpu(qh->link), le32_to_cpu(element)); le32_to_cpu(qh->link), le32_to_cpu(element));
if (qh->type == USB_ENDPOINT_XFER_ISOC) if (qh->type == USB_ENDPOINT_XFER_ISOC)
out += sprintf(out, "%*s period %d frame %x desc [%p]\n", out += sprintf(out, "%*s period %d phase %d load %d us, "
space, "", qh->period, qh->iso_frame, "frame %x desc [%p]\n",
qh->iso_packet_desc); space, "", qh->period, qh->phase, qh->load,
qh->iso_frame, qh->iso_packet_desc);
else if (qh->type == USB_ENDPOINT_XFER_INT)
out += sprintf(out, "%*s period %d phase %d load %d us\n",
space, "", qh->period, qh->phase, qh->load);
if (element & UHCI_PTR_QH) if (element & UHCI_PTR_QH)
out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
...@@ -352,6 +356,17 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) ...@@ -352,6 +356,17 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
out += sprintf(out, "HC status\n"); out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf)); out += uhci_show_status(uhci, out, len - (out - buf));
out += sprintf(out, "Periodic load table\n");
for (i = 0; i < MAX_PHASE; ++i) {
out += sprintf(out, "\t%d", uhci->load[i]);
if (i % 8 == 7)
*out++ = '\n';
}
out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
uhci->total_load,
uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
if (debug <= 1) if (debug <= 1)
return out - buf; return out - buf;
......
...@@ -83,6 +83,7 @@ ...@@ -83,6 +83,7 @@
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames #define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
* can be scheduled */ * can be scheduled */
#define MAX_PHASE 32 /* Periodic scheduling length */
/* When no queues need Full-Speed Bandwidth Reclamation, /* When no queues need Full-Speed Bandwidth Reclamation,
* delay this long before turning FSBR off */ * delay this long before turning FSBR off */
...@@ -141,6 +142,8 @@ struct uhci_qh { ...@@ -141,6 +142,8 @@ struct uhci_qh {
unsigned long advance_jiffies; /* Time of last queue advance */ unsigned long advance_jiffies; /* Time of last queue advance */
unsigned int unlink_frame; /* When the QH was unlinked */ unsigned int unlink_frame; /* When the QH was unlinked */
unsigned int period; /* For Interrupt and Isochronous QHs */ unsigned int period; /* For Interrupt and Isochronous QHs */
short phase; /* Between 0 and period-1 */
short load; /* Periodic time requirement, in us */
unsigned int iso_frame; /* Frame # for iso_packet_desc */ unsigned int iso_frame; /* Frame # for iso_packet_desc */
int iso_status; /* Status for Isochronous URBs */ int iso_status; /* Status for Isochronous URBs */
...@@ -153,6 +156,8 @@ struct uhci_qh { ...@@ -153,6 +156,8 @@ struct uhci_qh {
unsigned int needs_fixup:1; /* Must fix the TD toggle values */ unsigned int needs_fixup:1; /* Must fix the TD toggle values */
unsigned int is_stopped:1; /* Queue was stopped by error/unlink */ unsigned int is_stopped:1; /* Queue was stopped by error/unlink */
unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */ unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */
unsigned int bandwidth_reserved:1; /* Periodic bandwidth has
* been allocated */
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
/* /*
...@@ -414,6 +419,9 @@ struct uhci_hcd { ...@@ -414,6 +419,9 @@ struct uhci_hcd {
wait_queue_head_t waitqh; /* endpoint_disable waiters */ wait_queue_head_t waitqh; /* endpoint_disable waiters */
int num_waiting; /* Number of waiters */ int num_waiting; /* Number of waiters */
int total_load; /* Sum of array values */
short load[MAX_PHASE]; /* Periodic allocations */
}; };
/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
......
...@@ -261,6 +261,14 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, ...@@ -261,6 +261,14 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
qh->udev = udev; qh->udev = udev;
hep->hcpriv = qh; hep->hcpriv = qh;
if (qh->type == USB_ENDPOINT_XFER_INT ||
qh->type == USB_ENDPOINT_XFER_ISOC)
qh->load = usb_calc_bus_time(udev->speed,
usb_endpoint_dir_in(&hep->desc),
qh->type == USB_ENDPOINT_XFER_ISOC,
le16_to_cpu(hep->desc.wMaxPacketSize))
/ 1000 + 1;
} else { /* Skeleton QH */ } else { /* Skeleton QH */
qh->state = QH_STATE_ACTIVE; qh->state = QH_STATE_ACTIVE;
qh->type = -1; qh->type = -1;
...@@ -496,6 +504,121 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) ...@@ -496,6 +504,121 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
wake_up_all(&uhci->waitqh); wake_up_all(&uhci->waitqh);
} }
/*
* Find the highest existing bandwidth load for a given phase and period.
*/
static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
{
int highest_load = uhci->load[phase];
for (phase += period; phase < MAX_PHASE; phase += period)
highest_load = max_t(int, highest_load, uhci->load[phase]);
return highest_load;
}
/*
* Set qh->phase to the optimal phase for a periodic transfer and
* check whether the bandwidth requirement is acceptable.
*/
static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int minimax_load;
/* Find the optimal phase (unless it is already set) and get
* its load value. */
if (qh->phase >= 0)
minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
else {
int phase, load;
int max_phase = min_t(int, MAX_PHASE, qh->period);
qh->phase = 0;
minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
for (phase = 1; phase < max_phase; ++phase) {
load = uhci_highest_load(uhci, phase, qh->period);
if (load < minimax_load) {
minimax_load = load;
qh->phase = phase;
}
}
}
/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
if (minimax_load + qh->load > 900) {
dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
"period %d, phase %d, %d + %d us\n",
qh->period, qh->phase, minimax_load, qh->load);
return -ENOSPC;
}
return 0;
}
/*
* Reserve a periodic QH's bandwidth in the schedule
*/
static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int i;
int load = qh->load;
char *p = "??";
for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
uhci->load[i] += load;
uhci->total_load += load;
}
uhci_to_hcd(uhci)->self.bandwidth_allocated =
uhci->total_load / MAX_PHASE;
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
p = "INT";
break;
case USB_ENDPOINT_XFER_ISOC:
++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
p = "ISO";
break;
}
qh->bandwidth_reserved = 1;
dev_dbg(uhci_dev(uhci),
"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
"reserve", qh->udev->devnum,
qh->hep->desc.bEndpointAddress, p,
qh->period, qh->phase, load);
}
/*
* Release a periodic QH's bandwidth reservation
*/
static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int i;
int load = qh->load;
char *p = "??";
for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
uhci->load[i] -= load;
uhci->total_load -= load;
}
uhci_to_hcd(uhci)->self.bandwidth_allocated =
uhci->total_load / MAX_PHASE;
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
p = "INT";
break;
case USB_ENDPOINT_XFER_ISOC:
--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
p = "ISO";
break;
}
qh->bandwidth_reserved = 0;
dev_dbg(uhci_dev(uhci),
"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
"release", qh->udev->devnum,
qh->hep->desc.bEndpointAddress, p,
qh->period, qh->phase, load);
}
static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
struct urb *urb) struct urb *urb)
{ {
...@@ -799,7 +922,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, ...@@ -799,7 +922,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
wmb(); wmb();
qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
qh->dummy_td = td; qh->dummy_td = td;
qh->period = urb->interval;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), toggle); usb_pipeout(urb->pipe), toggle);
...@@ -830,28 +952,42 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, ...@@ -830,28 +952,42 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
struct uhci_qh *qh) struct uhci_qh *qh)
{ {
int exponent; int ret;
/* USB 1.1 interrupt transfers only involve one packet per interval. /* USB 1.1 interrupt transfers only involve one packet per interval.
* Drivers can submit URBs of any length, but longer ones will need * Drivers can submit URBs of any length, but longer ones will need
* multiple intervals to complete. * multiple intervals to complete.
*/ */
/* Figure out which power-of-two queue to use */ if (!qh->bandwidth_reserved) {
for (exponent = 7; exponent >= 0; --exponent) { int exponent;
if ((1 << exponent) <= urb->interval)
break;
}
if (exponent < 0)
return -EINVAL;
urb->interval = 1 << exponent;
if (qh->period == 0) /* Figure out which power-of-two queue to use */
for (exponent = 7; exponent >= 0; --exponent) {
if ((1 << exponent) <= urb->interval)
break;
}
if (exponent < 0)
return -EINVAL;
qh->period = 1 << exponent;
qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
else if (qh->period != urb->interval)
return -EINVAL; /* Can't change the period */
return uhci_submit_common(uhci, urb, qh); /* For now, interrupt phase is fixed by the layout
* of the QH lists. */
qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
ret = uhci_check_bandwidth(uhci, qh);
if (ret)
return ret;
} else if (qh->period > urb->interval)
return -EINVAL; /* Can't decrease the period */
ret = uhci_submit_common(uhci, urb, qh);
if (ret == 0) {
urb->interval = qh->period;
if (!qh->bandwidth_reserved)
uhci_reserve_bandwidth(uhci, qh);
}
return ret;
} }
/* /*
...@@ -998,15 +1134,32 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, ...@@ -998,15 +1134,32 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
return -EFBIG; return -EFBIG;
/* Check the period and figure out the starting frame number */ /* Check the period and figure out the starting frame number */
if (qh->period == 0) { if (!qh->bandwidth_reserved) {
qh->period = urb->interval;
if (urb->transfer_flags & URB_ISO_ASAP) { if (urb->transfer_flags & URB_ISO_ASAP) {
qh->phase = -1; /* Find the best phase */
i = uhci_check_bandwidth(uhci, qh);
if (i)
return i;
/* Allow a little time to allocate the TDs */
uhci_get_current_frame_number(uhci); uhci_get_current_frame_number(uhci);
urb->start_frame = uhci->frame_number + 10; frame = uhci->frame_number + 10;
/* Move forward to the first frame having the
* correct phase */
urb->start_frame = frame + ((qh->phase - frame) &
(qh->period - 1));
} else { } else {
i = urb->start_frame - uhci->last_iso_frame; i = urb->start_frame - uhci->last_iso_frame;
if (i <= 0 || i >= UHCI_NUMFRAMES) if (i <= 0 || i >= UHCI_NUMFRAMES)
return -EINVAL; return -EINVAL;
qh->phase = urb->start_frame & (qh->period - 1);
i = uhci_check_bandwidth(uhci, qh);
if (i)
return i;
} }
} else if (qh->period != urb->interval) { } else if (qh->period != urb->interval) {
return -EINVAL; /* Can't change the period */ return -EINVAL; /* Can't change the period */
...@@ -1052,9 +1205,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, ...@@ -1052,9 +1205,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
/* Set the interrupt-on-completion flag on the last packet. */ /* Set the interrupt-on-completion flag on the last packet. */
td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
qh->skel = uhci->skel_iso_qh;
qh->period = urb->interval;
/* Add the TDs to the frame list */ /* Add the TDs to the frame list */
frame = urb->start_frame; frame = urb->start_frame;
list_for_each_entry(td, &urbp->td_list, list) { list_for_each_entry(td, &urbp->td_list, list) {
...@@ -1068,6 +1218,9 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, ...@@ -1068,6 +1218,9 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
qh->iso_status = 0; qh->iso_status = 0;
} }
qh->skel = uhci->skel_iso_qh;
if (!qh->bandwidth_reserved)
uhci_reserve_bandwidth(uhci, qh);
return 0; return 0;
} }
...@@ -1122,7 +1275,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, ...@@ -1122,7 +1275,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
unsigned long flags; unsigned long flags;
struct urb_priv *urbp; struct urb_priv *urbp;
struct uhci_qh *qh; struct uhci_qh *qh;
int bustime;
spin_lock_irqsave(&uhci->lock, flags); spin_lock_irqsave(&uhci->lock, flags);
...@@ -1152,35 +1304,11 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, ...@@ -1152,35 +1304,11 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
ret = uhci_submit_bulk(uhci, urb, qh); ret = uhci_submit_bulk(uhci, urb, qh);
break; break;
case USB_ENDPOINT_XFER_INT: case USB_ENDPOINT_XFER_INT:
if (list_empty(&qh->queue)) { ret = uhci_submit_interrupt(uhci, urb, qh);
bustime = usb_check_bandwidth(urb->dev, urb);
if (bustime < 0)
ret = bustime;
else {
ret = uhci_submit_interrupt(uhci, urb, qh);
if (ret == 0)
usb_claim_bandwidth(urb->dev, urb, bustime, 0);
}
} else { /* inherit from parent */
struct urb_priv *eurbp;
eurbp = list_entry(qh->queue.prev, struct urb_priv,
node);
urb->bandwidth = eurbp->urb->bandwidth;
ret = uhci_submit_interrupt(uhci, urb, qh);
}
break; break;
case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_ISOC:
urb->error_count = 0; urb->error_count = 0;
bustime = usb_check_bandwidth(urb->dev, urb);
if (bustime < 0) {
ret = bustime;
break;
}
ret = uhci_submit_isochronous(uhci, urb, qh); ret = uhci_submit_isochronous(uhci, urb, qh);
if (ret == 0)
usb_claim_bandwidth(urb->dev, urb, bustime, 1);
break; break;
} }
if (ret != 0) if (ret != 0)
...@@ -1277,24 +1405,6 @@ __acquires(uhci->lock) ...@@ -1277,24 +1405,6 @@ __acquires(uhci->lock)
uhci_free_urb_priv(uhci, urbp); uhci_free_urb_priv(uhci, urbp);
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
/* Release bandwidth for Interrupt or Isoc. transfers */
if (urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 1);
break;
case USB_ENDPOINT_XFER_INT:
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Make sure we don't release if we have a queued URB */
if (list_empty(&qh->queue) && urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 0);
else
/* bandwidth was passed on to queued URB, */
/* so don't let usb_unlink_urb() release it */
urb->bandwidth = 0;
break;
}
spin_unlock(&uhci->lock); spin_unlock(&uhci->lock);
usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
spin_lock(&uhci->lock); spin_lock(&uhci->lock);
...@@ -1303,9 +1413,8 @@ __acquires(uhci->lock) ...@@ -1303,9 +1413,8 @@ __acquires(uhci->lock)
* reserved bandwidth. */ * reserved bandwidth. */
if (list_empty(&qh->queue)) { if (list_empty(&qh->queue)) {
uhci_unlink_qh(uhci, qh); uhci_unlink_qh(uhci, qh);
if (qh->bandwidth_reserved)
/* Bandwidth stuff not yet implemented */ uhci_release_bandwidth(uhci, qh);
qh->period = 0;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment