Commit 674f8438 authored by Mathias Nyman's avatar Mathias Nyman Committed by Greg Kroah-Hartman

xhci: split handling halted endpoints into two steps

Don't queue both a reset endpoint command and a
set TR deq command at once when handling a halted endpoint.

split this into two steps.
Initially only queue a reset endpoint command, and then if needed queue a
set TR deq command in the reset endpoint handler.

Note: This removes the RESET_EP_QUIRK handling which was added in
commit ac9d8fe7 ("USB: xhci: Add quirk for Fresco Logic xHCI hardware.")

This quirk was added in 2009 for prototype xHCI hardware meant for
evaluation purposes only, and should not reach consumers.
This hardware could not handle two commands queued at once, and had
bad data in the output context after a reset endpoint command.

After this patch two command are no longer queued at once, so that
part is solved  in this rewrite, but the workaround for bad data in the
output context solved by issuing an extra configure endpoint command is
bluntly removed.

Adding this workaround to the new rewrite just adds complexity, and I
think it's time to let this quirk go.
Print a debug message instead.
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20210129130044.206855-22-mathias.nyman@linux.intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7c6c334e
...@@ -797,6 +797,30 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -797,6 +797,30 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
return 0; return 0;
} }
/* Complete the cancelled URBs we unlinked from td_list. */
static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
{
struct xhci_ring *ring;
struct xhci_td *td, *tmp_td;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
/*
* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARED)
xhci_td_cleanup(ep->xhci, td, ring, 0);
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
return;
}
}
static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, enum xhci_ep_reset_type reset_type) unsigned int ep_index, enum xhci_ep_reset_type reset_type)
{ {
...@@ -834,15 +858,19 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, ...@@ -834,15 +858,19 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
ep->ep_state |= EP_HALTED; ep->ep_state |= EP_HALTED;
/* add td to cancelled list and let reset ep handler take care of it */
if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
if (td && list_empty(&td->cancelled_td_list)) {
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
td->cancel_status = TD_HALTED;
}
}
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
if (err) if (err)
return; return;
if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
xhci_cleanup_stalled_ring(xhci, slot_id, ep->ep_index, stream_id,
td);
}
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
} }
...@@ -851,16 +879,20 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci, ...@@ -851,16 +879,20 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
* We have the xHCI lock, so nothing can modify this list until we drop it. * We have the xHCI lock, so nothing can modify this list until we drop it.
* We're also in the event handler, so we can't get re-interrupted if another * We're also in the event handler, so we can't get re-interrupted if another
* Stop Endpoint command completes. * Stop Endpoint command completes.
*
* only call this when ring is not in a running state
*/ */
static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep, static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
struct xhci_dequeue_state *deq_state)
{ {
struct xhci_hcd *xhci; struct xhci_hcd *xhci;
struct xhci_td *td = NULL; struct xhci_td *td = NULL;
struct xhci_td *tmp_td = NULL; struct xhci_td *tmp_td = NULL;
struct xhci_td *cached_td = NULL;
struct xhci_ring *ring; struct xhci_ring *ring;
struct xhci_dequeue_state deq_state;
u64 hw_deq; u64 hw_deq;
unsigned int slot_id = ep->vdev->slot_id;
xhci = ep->xhci; xhci = ep->xhci;
...@@ -886,14 +918,28 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep, ...@@ -886,14 +918,28 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep,
if (trb_in_td(xhci, td->start_seg, td->first_trb, if (trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, hw_deq, false)) { td->last_trb, hw_deq, false)) {
xhci_find_new_dequeue_state(xhci, ep->vdev->slot_id, switch (td->cancel_status) {
ep->ep_index, case TD_CLEARED: /* TD is already no-op */
td->urb->stream_id, case TD_CLEARING_CACHE: /* set TR deq command already queued */
td, deq_state); break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
/* FIXME stream case, several stopped rings */
cached_td = td;
break;
}
} else { } else {
td_to_noop(xhci, ring, td, false); td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
} }
}
if (cached_td) {
cached_td->cancel_status = TD_CLEARING_CACHE;
xhci_find_new_dequeue_state(xhci, slot_id, ep->ep_index,
cached_td->urb->stream_id,
cached_td, &deq_state);
xhci_queue_new_dequeue_state(xhci, slot_id, ep->ep_index,
&deq_state);
} }
return 0; return 0;
} }
...@@ -912,81 +958,32 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, ...@@ -912,81 +958,32 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb) union xhci_trb *trb)
{ {
unsigned int ep_index; unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep; struct xhci_virt_ep *ep;
struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_virt_device *vdev;
struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
if (!xhci->devs[slot_id]) if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command " xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
"completion for disabled slot %u\n", slot_id);
slot_id);
return; return;
} }
memset(&deq_state, 0, sizeof(deq_state));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep) if (!ep)
return; return;
vdev = ep->vdev; ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_stop_ep(ep_ctx);
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
xhci_invalidate_cancelled_tds(ep, &deq_state); trace_xhci_handle_cmd_stop_ep(ep_ctx);
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
xhci_invalidate_cancelled_tds(ep);
xhci_stop_watchdog_timer_in_irq(xhci, ep); xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ /* Otherwise ring the doorbell(s) to restart queued transfers */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { xhci_giveback_invalidated_tds(ep);
xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
&deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
/*
* Drop the lock and complete the URBs in the cancelled TD list.
* New TDs to be cancelled might be added to the end of the list before
* we can complete all the URBs for the TDs we already unlinked.
* So stop when we've completed the URB for the last TD we unlinked.
*/
do {
cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
xhci_td_cleanup(xhci, cur_td, ep_ring, 0);
/* Stop processing the cancelled list if the watchdog timer is
* running.
*/
if (xhci->xhc_state & XHCI_STATE_DYING)
return;
} while (cur_td != last_unlinked_td);
/* Return to the event handler with xhci->lock re-acquired */
} }
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
...@@ -1202,6 +1199,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -1202,6 +1199,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_virt_ep *ep; struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx; struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
...@@ -1279,7 +1277,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -1279,7 +1277,15 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep->queued_deq_seg, ep->queued_deq_ptr); ep->queued_deq_seg, ep->queued_deq_ptr);
} }
} }
/* HW cached TDs cleared from cache, give them back */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
if (td->cancel_status == TD_CLEARING_CACHE) {
td->cancel_status = TD_CLEARED;
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
}
}
cleanup: cleanup:
ep->ep_state &= ~SET_DEQ_PENDING; ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL; ep->queued_deq_seg = NULL;
...@@ -1309,27 +1315,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, ...@@ -1309,27 +1315,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Ignoring reset ep completion code of %u", cmd_comp_code); "Ignoring reset ep completion code of %u", cmd_comp_code);
/* HW with the reset endpoint quirk needs to have a configure endpoint /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
* command complete before the endpoint can be used. Queue that here xhci_invalidate_cancelled_tds(ep);
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
struct xhci_command *command;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (xhci->quirks & XHCI_RESET_EP_QUIRK)
if (!command) xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
return; /* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, xhci_giveback_invalidated_tds(ep);
"Queueing configure endpoint command");
xhci_queue_configure_endpoint(xhci, command,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
}
/* if this was a soft reset, then restart */ /* if this was a soft reset, then restart */
if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
...@@ -2070,7 +2064,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -2070,7 +2064,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
xhci_clear_hub_tt_buffer(xhci, td, ep); xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td, xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
EP_HARD_RESET); EP_HARD_RESET);
return 0; /* xhci_handle_halted_endpoint marked td cancelled */
} else { } else {
/* Update ring dequeue pointer */ /* Update ring dequeue pointer */
ep_ring->dequeue = td->last_trb; ep_ring->dequeue = td->last_trb;
......
...@@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) ...@@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1); return 1 << (xhci_get_endpoint_index(desc) + 1);
} }
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
{
return 1 << (ep_index + 1);
}
/* Compute the last valid endpoint context index. Basically, this is the /* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint, * endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags. * we find the most significant bit set in the added contexts flags.
...@@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
for (; i < urb_priv->num_tds; i++) { for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i]; td = &urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); /* TD can already be on cancelled list if ep halted on it */
if (list_empty(&td->cancelled_td_list)) {
td->cancel_status = TD_DIRTY;
list_add_tail(&td->cancelled_td_list,
&ep->cancelled_td_list);
}
} }
/* Queue a stop endpoint command, but only if this is /* Queue a stop endpoint command, but only if this is
...@@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ...@@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
} }
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_container_ctx *in_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
dma_addr_t addr;
in_ctx = xhci->devs[slot_id]->in_ctx;
ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
return;
}
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
}
ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ctrl_ctx,
added_ctxs, added_ctxs);
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td)
{
struct xhci_dequeue_state deq_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Cleaning up stalled endpoint ring");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
&deq_state);
if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
return;
/* HW with the reset endpoint quirk will use the saved dequeue state to
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Queueing new dequeue state");
xhci_queue_new_dequeue_state(xhci, slot_id,
ep_index, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
* XXX: No idea how this hardware will react when stream rings
* are enabled.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Setting up input context for "
"configure endpoint command");
xhci_setup_input_ctx_for_quirk(xhci, slot_id,
ep_index, &deq_state);
}
}
static void xhci_endpoint_disable(struct usb_hcd *hcd, static void xhci_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *host_ep) struct usb_host_endpoint *host_ep)
{ {
......
...@@ -1539,10 +1539,18 @@ struct xhci_segment { ...@@ -1539,10 +1539,18 @@ struct xhci_segment {
unsigned int bounce_len; unsigned int bounce_len;
}; };
enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
TD_CLEARED,
};
struct xhci_td { struct xhci_td {
struct list_head td_list; struct list_head td_list;
struct list_head cancelled_td_list; struct list_head cancelled_td_list;
int status; int status;
enum xhci_cancelled_td_status cancel_status;
struct urb *urb; struct urb *urb;
struct xhci_segment *start_seg; struct xhci_segment *start_seg;
union xhci_trb *first_trb; union xhci_trb *first_trb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment