Commit 416b0d26 authored by Mathias Nyman's avatar Mathias Nyman Committed by Ben Hutchings

xhci: rework cycle bit checking for new dequeue pointers

commit 365038d8 upstream.

When we manually need to move the TR dequeue pointer we need to set the
correct cycle bit as well. Previously we used the trb pointer from the
last event received as a base, but this was changed in
commit 1f81b6d2 ("usb: xhci: Prefer endpoint context dequeue pointer")
to use the dequeue pointer from the endpoint context instead

It turns out some Asmedia controllers advance the dequeue pointer
stored in the endpoint context past the event triggering TRB, and
this messed up the way the cycle bit was calculated.

Instead of adding a quirk or complicating the already hard to follow cycle bit
code, the whole cycle bit calculation is now simplified and adapted to handle
event and endpoint context dequeue pointer differences.

Fixes: 1f81b6d2 ("usb: xhci: Prefer endpoint context dequeue pointer")
Reported-by: default avatarMaciej Puzio <mx34567@gmail.com>
Reported-by: default avatarEvan Langlois <uudruid74@gmail.com>
Reviewed-by: default avatarJulius Werner <jwerner@chromium.org>
Tested-by: default avatarMaciej Puzio <mx34567@gmail.com>
Tested-by: default avatarEvan Langlois <uudruid74@gmail.com>
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
[bwh: Backported to 3.2:
 - Debug logging in xhci_find_new_dequeue_state() is slightly different
 - Don't delete find_trb_seg(); it's still needed by xhci_cmd_to_noop()]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent a1724533
...@@ -572,9 +572,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -572,9 +572,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index]; struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb; struct xhci_segment *new_seg;
union xhci_trb *new_deq;
dma_addr_t addr; dma_addr_t addr;
u64 hw_dequeue; u64 hw_dequeue;
bool cycle_found = false;
bool td_last_trb_found = false;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id); ep_index, stream_id);
...@@ -598,44 +601,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -598,44 +601,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
hw_dequeue = le64_to_cpu(ep_ctx->deq); hw_dequeue = le64_to_cpu(ep_ctx->deq);
} }
/* Find virtual address and segment of hardware dequeue pointer */ new_seg = ep_ring->deq_seg;
state->new_deq_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue;
state->new_deq_ptr = ep_ring->dequeue; state->new_cycle_state = hw_dequeue & 0x1;
while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
!= (dma_addr_t)(hw_dequeue & ~0xf)) {
next_trb(xhci, ep_ring, &state->new_deq_seg,
&state->new_deq_ptr);
if (state->new_deq_ptr == ep_ring->dequeue) {
WARN_ON(1);
return;
}
}
/* /*
* Find cycle state for last_trb, starting at old cycle state of * We want to find the pointer, segment and cycle state of the new trb
* hw_dequeue. If there is only one segment ring, find_trb_seg() will * (the one after current TD's last_trb). We know the cycle state at
* return immediately and cannot toggle the cycle state if this search * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
* wraps around, so add one more toggle manually in that case. * found.
*/ */
state->new_cycle_state = hw_dequeue & 0x1; do {
if (ep_ring->first_seg == ep_ring->first_seg->next && if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
cur_td->last_trb < state->new_deq_ptr) == (dma_addr_t)(hw_dequeue & ~0xf)) {
state->new_cycle_state ^= 0x1; cycle_found = true;
if (td_last_trb_found)
state->new_deq_ptr = cur_td->last_trb; break;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); }
state->new_deq_seg = find_trb_seg(state->new_deq_seg, if (new_deq == cur_td->last_trb)
state->new_deq_ptr, &state->new_cycle_state); td_last_trb_found = true;
if (!state->new_deq_seg) {
WARN_ON(1); if (cycle_found &&
return; TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
} new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
xhci_err(xhci, "Error: Failed finding new dequeue state\n");
state->new_deq_seg = NULL;
state->new_deq_ptr = NULL;
return;
}
} while (!cycle_found || !td_last_trb_found);
/* Increment to find next TRB after last_trb. Cycle if appropriate. */ state->new_deq_seg = new_seg;
trb = &state->new_deq_ptr->generic; state->new_deq_ptr = new_deq;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/* Don't update the ring cycle state for the producer (us). */ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
......
...@@ -2819,6 +2819,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, ...@@ -2819,6 +2819,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
ep_index, ep->stopped_stream, ep->stopped_td, ep_index, ep->stopped_stream, ep->stopped_td,
&deq_state); &deq_state);
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
return;
/* HW with the reset endpoint quirk will use the saved dequeue state to /* HW with the reset endpoint quirk will use the saved dequeue state to
* issue a configure endpoint command later. * issue a configure endpoint command later.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment