Commit 79b8094f authored by Lu Baolu's avatar Lu Baolu Committed by Greg Kroah-Hartman

xhci: xHCI 1.1: Contiguous Frame ID Capability (CFC)

If the Contiguous Frame ID Capability is supported (CFC = 1),
then the xHC shall match the Frame ID in every Isoch TD with
SIA = 0 against the Frame Index of the MFINDEX register. This
rule ensures resynchronization of Isoch TDs even if some are
dropped due to Missed Service Errors or Stopping the endpoint.

This patch enables xHCI driver to support CFC by calculating
and setting the Frame ID field of an Isoch TRB.

[made some dbg messages checkpatch friendly -Mathias]
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4758dcd1
...@@ -99,6 +99,8 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci) ...@@ -99,6 +99,8 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp); xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC generates %s bit addresses\n", xhci_dbg(xhci, " HC generates %s bit addresses\n",
HCC_64BIT_ADDR(temp) ? "64" : "32"); HCC_64BIT_ADDR(temp) ? "64" : "32");
xhci_dbg(xhci, " HC %s Contiguous Frame ID Capability\n",
HCC_CFC(temp) ? "has" : "hasn't");
/* FIXME */ /* FIXME */
xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n"); xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
......
...@@ -3555,6 +3555,97 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, ...@@ -3555,6 +3555,97 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
} }
} }
/*
* Calculates Frame ID field of the isochronous TRB identifies the
* target frame that the Interval associated with this Isochronous
* Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
*
* Returns actual frame id on success, negative value on error.
*/
static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
struct urb *urb, int index)
{
int start_frame, ist, ret = 0;
int start_frame_id, end_frame_id, current_frame_id;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
start_frame = urb->start_frame + index * urb->interval;
else
start_frame = (urb->start_frame + index * urb->interval) >> 3;
/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
*
* If bit [3] of IST is cleared to '0', software can add a TRB no
* later than IST[2:0] Microframes before that TRB is scheduled to
* be executed.
* If bit [3] of IST is set to '1', software can add a TRB no later
* than IST[2:0] Frames before that TRB is scheduled to be executed.
*/
ist = HCS_IST(xhci->hcs_params2) & 0x7;
if (HCS_IST(xhci->hcs_params2) & (1 << 3))
ist <<= 3;
/* Software shall not schedule an Isoch TD with a Frame ID value that
* is less than the Start Frame ID or greater than the End Frame ID,
* where:
*
* End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
* Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
*
* Both the End Frame ID and Start Frame ID values are calculated
* in microframes. When software determines the valid Frame ID value;
* The End Frame ID value should be rounded down to the nearest Frame
* boundary, and the Start Frame ID value should be rounded up to the
* nearest Frame boundary.
*/
current_frame_id = readl(&xhci->run_regs->microframe_index);
start_frame_id = roundup(current_frame_id + ist + 1, 8);
end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
start_frame &= 0x7ff;
start_frame_id = (start_frame_id >> 3) & 0x7ff;
end_frame_id = (end_frame_id >> 3) & 0x7ff;
xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
__func__, index, readl(&xhci->run_regs->microframe_index),
start_frame_id, end_frame_id, start_frame);
if (start_frame_id < end_frame_id) {
if (start_frame > end_frame_id ||
start_frame < start_frame_id)
ret = -EINVAL;
} else if (start_frame_id > end_frame_id) {
if ((start_frame > end_frame_id &&
start_frame < start_frame_id))
ret = -EINVAL;
} else {
ret = -EINVAL;
}
if (index == 0) {
if (ret == -EINVAL || start_frame == start_frame_id) {
start_frame = start_frame_id + 1;
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame = start_frame;
else
urb->start_frame = start_frame << 3;
ret = 0;
}
}
if (ret) {
xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
start_frame, current_frame_id, index,
start_frame_id, end_frame_id);
xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
return ret;
}
return start_frame;
}
/* This is for isoc transfer */ /* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index) struct urb *urb, int slot_id, unsigned int ep_index)
...@@ -3571,7 +3662,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3571,7 +3662,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u64 start_addr, addr; u64 start_addr, addr;
int i, j; int i, j;
bool more_trbs_coming; bool more_trbs_coming;
struct xhci_virt_ep *xep;
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_tds = urb->number_of_packets; num_tds = urb->number_of_packets;
...@@ -3619,6 +3712,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3619,6 +3712,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td = urb_priv->td[i]; td = urb_priv->td[i];
for (j = 0; j < trbs_per_td; j++) { for (j = 0; j < trbs_per_td; j++) {
int frame_id = 0;
u32 remainder = 0; u32 remainder = 0;
field = 0; field = 0;
...@@ -3627,8 +3721,20 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3627,8 +3721,20 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
TRB_TLBPC(residue); TRB_TLBPC(residue);
/* Queue the isoc TRB */ /* Queue the isoc TRB */
field |= TRB_TYPE(TRB_ISOC); field |= TRB_TYPE(TRB_ISOC);
/* Assume URB_ISO_ASAP is set */
field |= TRB_SIA; /* Calculate Frame ID and SIA fields */
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
HCC_CFC(xhci->hcc_params)) {
frame_id = xhci_get_isoc_frame_id(xhci,
urb,
i);
if (frame_id >= 0)
field |= TRB_FRAME_ID(frame_id);
else
field |= TRB_SIA;
} else
field |= TRB_SIA;
if (i == 0) { if (i == 0) {
if (start_cycle == 0) if (start_cycle == 0)
field |= 0x1; field |= 0x1;
...@@ -3704,6 +3810,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3704,6 +3810,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} }
} }
/* store the next frame id */
if (HCC_CFC(xhci->hcc_params))
xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX) if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable(); usb_amd_quirk_pll_disable();
...@@ -3737,12 +3847,34 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3737,12 +3847,34 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return ret; return ret;
} }
static int ep_ring_is_processing(struct xhci_hcd *xhci,
int slot_id, unsigned int ep_index)
{
struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx;
struct xhci_virt_ep *xep;
dma_addr_t hw_deq;
xdev = xhci->devs[slot_id];
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xep->ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
return 0;
hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
return (hw_deq !=
xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
}
/* /*
* Check transfer ring to guarantee there is enough room for the urb. * Check transfer ring to guarantee there is enough room for the urb.
* Update ISO URB start_frame and interval. * Update ISO URB start_frame and interval.
* Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
* update the urb->start_frame by now. * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
* Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input. * Contiguous Frame ID is not supported by HC.
*/ */
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index) struct urb *urb, int slot_id, unsigned int ep_index)
...@@ -3755,8 +3887,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3755,8 +3887,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
int ep_interval; int ep_interval;
int num_tds, num_trbs, i; int num_tds, num_trbs, i;
int ret; int ret;
struct xhci_virt_ep *xep;
int ist;
xdev = xhci->devs[slot_id]; xdev = xhci->devs[slot_id];
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xdev->eps[ep_index].ring; ep_ring = xdev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
...@@ -3773,14 +3908,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3773,14 +3908,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
if (ret) if (ret)
return ret; return ret;
start_frame = readl(&xhci->run_regs->microframe_index); /*
start_frame &= 0x3fff; * Check interval value. This should be done before we start to
* calculate the start frame value.
urb->start_frame = start_frame; */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->start_frame >>= 3;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval; ep_interval = urb->interval;
/* Convert to microframes */ /* Convert to microframes */
...@@ -3801,6 +3932,40 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3801,6 +3932,40 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL) urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8; urb->interval /= 8;
} }
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) &&
ep_ring_is_processing(xhci, slot_id, ep_index)) {
urb->start_frame = xep->next_frame_id;
goto skip_start_over;
}
start_frame = readl(&xhci->run_regs->microframe_index);
start_frame &= 0x3fff;
/*
* Round up to the next frame and consider the time before trb really
* gets scheduled by hardare.
*/
ist = HCS_IST(xhci->hcs_params2) & 0x7;
if (HCS_IST(xhci->hcs_params2) & (1 << 3))
ist <<= 3;
start_frame += ist + XHCI_CFC_DELAY;
start_frame = roundup(start_frame, 8);
/*
* Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
* is greate than 8 microframes.
*/
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL) {
start_frame = roundup(start_frame, urb->interval << 3);
urb->start_frame = start_frame >> 3;
} else {
start_frame = roundup(start_frame, urb->interval);
urb->start_frame = start_frame;
}
skip_start_over:
ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
......
...@@ -119,6 +119,8 @@ struct xhci_cap_regs { ...@@ -119,6 +119,8 @@ struct xhci_cap_regs {
#define HCC_LTC(p) ((p) & (1 << 6)) #define HCC_LTC(p) ((p) & (1 << 6))
/* true: no secondary Stream ID Support */ /* true: no secondary Stream ID Support */
#define HCC_NSS(p) ((p) & (1 << 7)) #define HCC_NSS(p) ((p) & (1 << 7))
/* true: HC has Contiguous Frame ID Capability */
#define HCC_CFC(p) ((p) & (1 << 11))
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */ /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1)) #define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
/* Extended Capabilities pointer from PCI base - section 5.3.6 */ /* Extended Capabilities pointer from PCI base - section 5.3.6 */
...@@ -891,6 +893,8 @@ struct xhci_virt_ep { ...@@ -891,6 +893,8 @@ struct xhci_virt_ep {
/* Bandwidth checking storage */ /* Bandwidth checking storage */
struct xhci_bw_info bw_info; struct xhci_bw_info bw_info;
struct list_head bw_endpoint_list; struct list_head bw_endpoint_list;
/* Isoch Frame ID checking storage */
int next_frame_id;
}; };
enum xhci_overhead_type { enum xhci_overhead_type {
...@@ -1165,6 +1169,7 @@ enum xhci_setup_dev { ...@@ -1165,6 +1169,7 @@ enum xhci_setup_dev {
/* Isochronous TRB specific fields */ /* Isochronous TRB specific fields */
#define TRB_SIA (1<<31) #define TRB_SIA (1<<31)
#define TRB_FRAME_ID(p) (((p) & 0x7ff) << 20)
struct xhci_generic_trb { struct xhci_generic_trb {
__le32 field[4]; __le32 field[4];
...@@ -1601,6 +1606,8 @@ struct xhci_driver_overrides { ...@@ -1601,6 +1606,8 @@ struct xhci_driver_overrides {
int (*start)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd);
}; };
#define XHCI_CFC_DELAY 10
/* convert between an HCD pointer and the corresponding EHCI_HCD */ /* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd) static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment