Commit ffa0248e authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

USB: EHCI: create a "periodic schedule info" struct

This patch begins the process of unifying the scheduling parameters
that ehci-hcd uses for interrupt and isochronous transfers.  It
creates an ehci_per_sched structure, which will be stored in both
ehci_qh and ehci_iso_stream structures, and will contain the common
scheduling information needed for both.

Initially we merely create the new structure and move some existing
fields into it.  Later patches will add more fields and utilize these
structures in improved scheduling algorithms.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 91a99b5e
......@@ -571,7 +571,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
p.qh->period,
p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
/* uframe masks */
......@@ -618,7 +618,8 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
speed_char (scratch),
scratch & 0x007f,
(scratch >> 8) & 0x000f, type,
p.qh->usecs, p.qh->c_usecs,
p.qh->ps.usecs,
p.qh->ps.c_usecs,
temp,
0x7ff & (scratch >> 16));
......@@ -645,7 +646,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
case Q_TYPE_SITD:
temp = scnprintf (next, size,
" sitd%d-%04x/%p",
p.sitd->stream->interval,
p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
......
......@@ -1029,7 +1029,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
usb_settoggle(qh->dev, epnum, is_out, 0);
usb_settoggle(qh->ps.udev, epnum, is_out, 0);
qh->exception = 1;
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(ehci, qh);
......
......@@ -105,9 +105,9 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
is_out = qh->is_out;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
usb_settoggle(qh->ps.udev, epnum, is_out, 1);
}
}
......@@ -797,26 +797,25 @@ qh_make (
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
qh->ps.phase = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->ps.c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
if (urb->interval > 1 && urb->interval < 8) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
urb->interval = 1;
} else if (qh->period > ehci->periodic_size) {
qh->period = ehci->periodic_size;
urb->interval = qh->period << 3;
} else if (urb->interval > ehci->periodic_size << 3) {
urb->interval = ehci->periodic_size << 3;
}
qh->ps.period = urb->interval >> 3;
} else {
int think_time;
......@@ -826,27 +825,26 @@ qh_make (
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
qh->c_usecs = qh->usecs + HS_USECS (0);
qh->usecs = HS_USECS (1);
qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0);
qh->ps.usecs = HS_USECS(1);
} else { // SPLIT+DATA, gap, CSPLIT
qh->usecs += HS_USECS (1);
qh->c_usecs = HS_USECS (0);
qh->ps.usecs += HS_USECS(1);
qh->ps.c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US (think_time +
qh->ps.tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
qh->period = urb->interval;
if (qh->period > ehci->periodic_size) {
qh->period = ehci->periodic_size;
urb->interval = qh->period;
}
if (urb->interval > ehci->periodic_size)
urb->interval = ehci->periodic_size;
qh->ps.period = urb->interval;
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
qh->ps.udev = urb->dev;
qh->ps.ep = urb->ep;
/* using TT? */
switch (urb->dev->speed) {
......
......@@ -121,11 +121,11 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
hw = q->qh->hw;
/* is it in the S-mask? */
if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
usecs += q->qh->usecs;
usecs += q->qh->ps.usecs;
/* ... or C-mask? */
if (hw->hw_info2 & cpu_to_hc32(ehci,
1 << (8 + uframe)))
usecs += q->qh->c_usecs;
usecs += q->qh->ps.c_usecs;
hw_p = &hw->hw_next;
q = &q->qh->qh_next;
break;
......@@ -142,7 +142,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
break;
case Q_TYPE_ITD:
if (q->itd->hw_transaction[uframe])
usecs += q->itd->stream->usecs;
usecs += q->itd->stream->ps.usecs;
hw_p = &q->itd->hw_next;
q = &q->itd->itd_next;
break;
......@@ -152,7 +152,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
1 << uframe)) {
if (q->sitd->hw_fullspeed_ep &
cpu_to_hc32(ehci, 1<<31))
usecs += q->sitd->stream->usecs;
usecs += q->sitd->stream->ps.usecs;
else /* worst case for OUT start-split */
usecs += HS_USECS_ISO (188);
}
......@@ -161,7 +161,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
if (q->sitd->hw_uframe &
cpu_to_hc32(ehci, 1 << (8 + uframe))) {
/* worst case for IN complete-split */
usecs += q->sitd->stream->c_usecs;
usecs += q->sitd->stream->ps.c_usecs;
}
hw_p = &q->sitd->hw_next;
......@@ -258,9 +258,9 @@ periodic_tt_usecs (
q = &q->itd->itd_next;
continue;
case Q_TYPE_QH:
if (same_tt(dev, q->qh->dev)) {
if (same_tt(dev, q->qh->ps.udev)) {
uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
tt_usecs[uf] += q->qh->tt_usecs;
tt_usecs[uf] += q->qh->ps.tt_usecs;
}
hw_p = &q->qh->hw->hw_next;
q = &q->qh->qh_next;
......@@ -268,7 +268,7 @@ periodic_tt_usecs (
case Q_TYPE_SITD:
if (same_tt(dev, q->sitd->urb->dev)) {
uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
tt_usecs[uf] += q->sitd->stream->tt_usecs;
tt_usecs[uf] += q->sitd->stream->ps.tt_usecs;
}
hw_p = &q->sitd->hw_next;
q = &q->sitd->sitd_next;
......@@ -391,7 +391,7 @@ static int tt_no_collision (
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt (dev, here.qh->dev)) {
if (same_tt(dev, here.qh->ps.udev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
......@@ -471,19 +471,19 @@ static void disable_periodic(struct ehci_hcd *ehci)
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
unsigned period = qh->ps.period;
dev_dbg (&qh->dev->dev,
dev_dbg(&qh->ps.udev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < ehci->periodic_size; i += period) {
for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
union ehci_shadow *prev = &ehci->pshadow[i];
__hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
......@@ -503,7 +503,7 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
if (qh->ps.period > here.qh->ps.period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw->hw_next;
......@@ -523,10 +523,10 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->xacterrs = 0;
qh->exception = 0;
/* update per-qh bandwidth for usbfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.period)
: (qh->ps.usecs * 8);
list_add(&qh->intr_node, &ehci->intr_qh_list);
......@@ -556,22 +556,21 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
/* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0)
period = 1;
period = qh->ps.period ? : 1;
for (i = qh->start; i < ehci->periodic_size; i += period)
for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
periodic_unlink (ehci, i, qh);
/* update per-qh bandwidth for usbfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.period
? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.period)
: (qh->ps.usecs * 8);
dev_dbg (&qh->dev->dev,
dev_dbg(&qh->ps.udev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
qh->ps.period,
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
......@@ -744,26 +743,26 @@ static int check_intr_schedule (
int retval = -ENOSPC;
u8 mask = 0;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
if (!check_period(ehci, frame, uframe, qh->ps.period, qh->ps.usecs))
goto done;
if (!qh->c_usecs) {
if (!qh->ps.c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
qh->tt_usecs)) {
if (tt_available(ehci, qh->ps.period, qh->ps.udev, frame, uframe,
qh->ps.tt_usecs)) {
unsigned i;
/* TODO : this may need FSTN for SSPLIT in uframe 5. */
for (i = uframe+2; i < 8 && i <= uframe+4; i++)
if (!check_period (ehci, frame, i,
qh->period, qh->c_usecs))
if (!check_period(ehci, frame, i,
qh->ps.period, qh->ps.c_usecs))
goto done;
else
mask |= 1 << i;
......@@ -784,12 +783,12 @@ static int check_intr_schedule (
*c_maskp = cpu_to_hc32(ehci, mask << 8);
mask |= 1 << uframe;
if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
if (tt_no_collision(ehci, qh->ps.period, qh->ps.udev, frame, mask)) {
if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
qh->ps.period, qh->ps.c_usecs))
goto done;
if (!check_period (ehci, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
if (!check_period(ehci, frame, uframe + qh->gap_uf,
qh->ps.period, qh->ps.c_usecs))
goto done;
retval = 0;
}
......@@ -810,7 +809,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
struct ehci_qh_hw *hw = qh->hw;
hw->hw_next = EHCI_LIST_END(ehci);
frame = qh->start;
frame = qh->ps.phase;
/* reuse the previous schedule slots, if we can */
if (frame != NO_FRAME) {
......@@ -828,11 +827,11 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
if (qh->ps.period) {
int i;
for (i = qh->period; status && i > 0; --i) {
frame = ++ehci->random_frame % qh->period;
for (i = qh->ps.period; status && i > 0; --i) {
frame = ++ehci->random_frame % qh->ps.period;
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule (ehci,
frame, uframe, qh,
......@@ -842,18 +841,18 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
}
}
/* qh->period == 0 means every uframe */
/* qh->ps.period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
}
if (status)
goto done;
qh->start = frame;
qh->ps.phase = frame;
/* reset S-frame and (maybe) C-frame masks */
hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
hw->hw_info2 |= qh->period
hw->hw_info2 |= qh->ps.period
? cpu_to_hc32(ehci, 1 << uframe)
: cpu_to_hc32(ehci, QH_SMASK);
hw->hw_info2 |= c_mask;
......@@ -978,25 +977,24 @@ static void
iso_stream_init (
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct usb_device *dev,
int pipe,
unsigned interval
struct urb *urb
)
{
static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
struct usb_device *dev = urb->dev;
unsigned interval = urb->interval;
u32 buf1;
unsigned epnum, maxp;
int is_input;
long bandwidth;
/*
* this might be a "high bandwidth" highspeed endpoint,
* as encoded in the ep descriptor's wMaxPacket field
*/
epnum = usb_pipeendpoint (pipe);
is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
maxp = usb_maxpacket(dev, pipe, !is_input);
epnum = usb_pipeendpoint(urb->pipe);
is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
if (is_input) {
buf1 = (1 << 11);
} else {
......@@ -1020,9 +1018,11 @@ iso_stream_init (
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
*/
stream->usecs = HS_USECS_ISO (maxp);
bandwidth = stream->usecs * 8;
bandwidth /= interval;
stream->ps.usecs = HS_USECS_ISO(maxp);
stream->bandwidth = stream->ps.usecs * 8 / interval;
stream->uperiod = interval;
stream->ps.period = interval >> 3;
} else {
u32 addr;
......@@ -1036,17 +1036,17 @@ iso_stream_init (
addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
stream->usecs = HS_USECS_ISO (maxp);
stream->ps.usecs = HS_USECS_ISO(maxp);
think_time = dev->tt ? dev->tt->think_time : 0;
stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
hs_transfers = max (1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
addr |= 1 << 31;
stream->c_usecs = stream->usecs;
stream->usecs = HS_USECS_ISO (1);
stream->ps.c_usecs = stream->ps.usecs;
stream->ps.usecs = HS_USECS_ISO(1);
stream->raw_mask = 1;
/* c-mask as specified in USB 2.0 11.18.4 3.c */
......@@ -1054,18 +1054,20 @@ iso_stream_init (
stream->raw_mask |= tmp << (8 + 2);
} else
stream->raw_mask = smask_out [hs_transfers - 1];
bandwidth = stream->usecs + stream->c_usecs;
bandwidth /= interval;
stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
interval;
stream->uperiod = interval << 3;
stream->ps.period = interval;
/* stream->splits gets created from raw_mask later */
stream->address = cpu_to_hc32(ehci, addr);
}
stream->bandwidth = bandwidth;
stream->udev = dev;
stream->ps.udev = dev;
stream->ps.ep = urb->ep;
stream->bEndpointAddress = is_input | epnum;
stream->interval = interval;
stream->maxp = maxp;
}
......@@ -1090,9 +1092,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely (stream != NULL)) {
ep->hcpriv = stream;
stream->ep = ep;
iso_stream_init(ehci, stream, urb->dev, urb->pipe,
urb->interval);
iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
......@@ -1137,7 +1137,7 @@ itd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->interval;
iso_sched->span = urb->number_of_packets * stream->uperiod;
/* figure out per-uframe itd fields that we'll need later
* when we fit new itds into the schedule.
......@@ -1304,14 +1304,14 @@ sitd_slot_ok (
*/
uf = uframe & 7;
if (!tt_available(ehci, period_uframes >> 3,
stream->udev, frame, uf, stream->tt_usecs))
stream->ps.udev, frame, uf, stream->ps.tt_usecs))
return 0;
#else
/* tt must be idle for start(s), any gap, and csplit.
* assume scheduling slop leaves 10+% for control/bulk.
*/
if (!tt_no_collision(ehci, period_uframes >> 3,
stream->udev, frame, mask))
stream->ps.udev, frame, mask))
return 0;
#endif
......@@ -1325,16 +1325,17 @@ sitd_slot_ok (
uf = uframe & 7;
/* check starts (OUT uses more than one) */
max_used = ehci->uframe_periodic_max - stream->usecs;
max_used = ehci->uframe_periodic_max - stream->ps.usecs;
for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
if (periodic_usecs (ehci, frame, uf) > max_used)
return 0;
}
/* for IN, check CSPLIT */
if (stream->c_usecs) {
if (stream->ps.c_usecs) {
uf = uframe & 7;
max_used = ehci->uframe_periodic_max - stream->c_usecs;
max_used = ehci->uframe_periodic_max -
stream->ps.c_usecs;
do {
tmp = 1 << uf;
tmp <<= 8;
......@@ -1428,7 +1429,7 @@ iso_stream_schedule (
/* check schedule: enough space? */
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
stream->ps.usecs, period))
done = 1;
} else {
if ((start % 8) >= 6)
......@@ -1668,7 +1669,7 @@ static void itd_link_urb(
itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
next_uframe += stream->uperiod;
next_uframe &= mod - 1;
packet++;
......@@ -1808,9 +1809,9 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (unlikely (urb->interval != stream->interval)) {
if (unlikely(urb->interval != stream->uperiod)) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
stream->interval, urb->interval);
stream->uperiod, urb->interval);
goto done;
}
......@@ -1875,7 +1876,7 @@ sitd_sched_init(
dma_addr_t dma = urb->transfer_dma;
/* how many frames are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->interval;
iso_sched->span = urb->number_of_packets * stream->ps.period;
/* figure out per-frame sitd fields that we'll need later
* when we fit new sitds into the schedule.
......@@ -2069,7 +2070,7 @@ static void sitd_link_urb(
sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
sitd);
next_uframe += stream->interval << 3;
next_uframe += stream->uperiod;
}
stream->next_uframe = next_uframe & (mod - 1);
......@@ -2188,9 +2189,9 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (urb->interval != stream->interval) {
if (urb->interval != stream->ps.period) {
ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
stream->interval, urb->interval);
stream->ps.period, urb->interval);
goto done;
}
......
......@@ -54,6 +54,19 @@ struct ehci_stats {
unsigned long unlink;
};
/*
* Scheduling and budgeting information for periodic transfers, for both
* high-speed devices and full/low-speed devices lying behind a TT.
*/
struct ehci_per_sched {
struct usb_device *udev; /* access to the TT */
struct usb_host_endpoint *ep;
u16 tt_usecs; /* time on the FS/LS bus */
u16 period; /* actual period in frames */
u16 phase; /* actual phase, frame part */
u8 phase_uf; /* uframe part of the phase */
u8 usecs, c_usecs; /* times on the HS bus */
};
#define NO_FRAME 29999 /* frame not assigned yet */
/* ehci_hcd->lock guards shared data against other CPUs:
......@@ -387,6 +400,7 @@ struct ehci_qh {
struct list_head intr_node; /* list of intr QHs */
struct ehci_qtd *dummy;
struct list_head unlink_node;
struct ehci_per_sched ps; /* scheduling info */
unsigned unlink_cycle;
......@@ -400,15 +414,8 @@ struct ehci_qh {
u8 xacterrs; /* XactErr retry counter */
#define QH_XACTERR_MAX 32 /* XactErr retry limit */
/* periodic schedule info */
u8 usecs; /* intr bandwidth */
u8 gap_uf; /* uframes split/csplit gap */
u8 c_usecs; /* ... split completion bw */
u16 tt_usecs; /* tt downstream bandwidth */
unsigned short period; /* polling interval */
unsigned short start; /* where polling starts */
struct usb_device *dev; /* access to TT */
unsigned is_out:1; /* bulk or intr OUT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
unsigned dequeue_during_giveback:1;
......@@ -451,20 +458,16 @@ struct ehci_iso_stream {
u8 highspeed;
struct list_head td_list; /* queued itds/sitds */
struct list_head free_list; /* list of unused itds/sitds */
struct usb_device *udev;
struct usb_host_endpoint *ep;
/* output of (re)scheduling */
struct ehci_per_sched ps; /* scheduling info */
unsigned next_uframe;
__hc32 splits;
/* the rest is derived from the endpoint descriptor,
* trusting urb->interval == f(epdesc->bInterval) and
* including the extra info for hw_bufp[0..2]
*/
u8 usecs, c_usecs;
u16 interval;
u16 tt_usecs;
u16 uperiod; /* period in uframes */
u16 maxp;
u16 raw_mask;
unsigned bandwidth;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment