Commit dbc26546 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge branch 'for-usb-next' of...

Merge branch 'for-usb-next' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next

* 'for-usb-next' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/sarah/xhci:
  xhci 1.0: Set transfer burst last packet count field.
  xhci 1.0: Set transfer burst count field.
  xhci 1.0: Update TD size field format.
  xhci 1.0: Only interrupt on short packet for IN EPs.
  xhci: Remove sparse warning about cmd_status.
  usbcore: warm reset USB3 port in SS.Inactive state
  usbcore: Refine USB3.0 device suspend and resume
  xHCI: report USB3.0 portstatus comply with USB3.0 specification
  xHCI: Set link state support
  xHCI: Clear link state change support
  xHCI: warm reset support
  usb/ch9: use proper endianess for wBytesPerInterval
  xhci: Remove recursive call to xhci_handle_event
  xhci: Add an assertion to check for virt_dev=0 bug.
  xhci: Add rmb() between reading event validity & event data access.
  xhci: Make xHCI driver endian-safe
parents 71a9f9d2 b61d378f
...@@ -129,7 +129,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ...@@ -129,7 +129,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
else else
max_tx = 999999; max_tx = 999999;
if (desc->wBytesPerInterval > max_tx) { if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to %d\n", "setting to %d\n",
......
...@@ -379,15 +379,6 @@ static int hub_port_status(struct usb_hub *hub, int port1, ...@@ -379,15 +379,6 @@ static int hub_port_status(struct usb_hub *hub, int port1,
*status = le16_to_cpu(hub->status->port.wPortStatus); *status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange); *change = le16_to_cpu(hub->status->port.wPortChange);
if ((hub->hdev->parent != NULL) &&
hub_is_superspeed(hub->hdev)) {
/* Translate the USB 3 port status */
u16 tmp = *status & USB_SS_PORT_STAT_MASK;
if (*status & USB_SS_PORT_STAT_POWER)
tmp |= USB_PORT_STAT_POWER;
*status = tmp;
}
ret = 0; ret = 0;
} }
mutex_unlock(&hub->status_mutex); mutex_unlock(&hub->status_mutex);
...@@ -2160,11 +2151,76 @@ static int hub_port_reset(struct usb_hub *hub, int port1, ...@@ -2160,11 +2151,76 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
return status; return status;
} }
/* Warm reset a USB3 protocol port */
static int hub_port_warm_reset(struct usb_hub *hub, int port)
{
int ret;
u16 portstatus, portchange;
if (!hub_is_superspeed(hub->hdev)) {
dev_err(hub->intfdev, "only USB3 hub support warm reset\n");
return -EINVAL;
}
/* Warm reset the port */
ret = set_port_feature(hub->hdev,
port, USB_PORT_FEAT_BH_PORT_RESET);
if (ret) {
dev_err(hub->intfdev, "cannot warm reset port %d\n", port);
return ret;
}
msleep(20);
ret = hub_port_status(hub, port, &portstatus, &portchange);
if (portchange & USB_PORT_STAT_C_RESET)
clear_port_feature(hub->hdev, port, USB_PORT_FEAT_C_RESET);
if (portchange & USB_PORT_STAT_C_BH_RESET)
clear_port_feature(hub->hdev, port,
USB_PORT_FEAT_C_BH_PORT_RESET);
if (portchange & USB_PORT_STAT_C_LINK_STATE)
clear_port_feature(hub->hdev, port,
USB_PORT_FEAT_C_PORT_LINK_STATE);
return ret;
}
/* Check if a port is power on */
static int port_is_power_on(struct usb_hub *hub, unsigned portstatus)
{
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portstatus & USB_SS_PORT_STAT_POWER)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_POWER)
ret = 1;
}
return ret;
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
#define MASK_BITS (USB_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION | \ /* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */
USB_PORT_STAT_SUSPEND) static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
#define WANT_BITS (USB_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION) {
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if ((portstatus & USB_PORT_STAT_LINK_STATE)
== USB_SS_PORT_LS_U3)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_SUSPEND)
ret = 1;
}
return ret;
}
/* Determine whether the device on a port is ready for a normal resume, /* Determine whether the device on a port is ready for a normal resume,
* is ready for a reset-resume, or should be disconnected. * is ready for a reset-resume, or should be disconnected.
...@@ -2174,7 +2230,9 @@ static int check_port_resume_type(struct usb_device *udev, ...@@ -2174,7 +2230,9 @@ static int check_port_resume_type(struct usb_device *udev,
int status, unsigned portchange, unsigned portstatus) int status, unsigned portchange, unsigned portstatus)
{ {
/* Is the device still present? */ /* Is the device still present? */
if (status || (portstatus & MASK_BITS) != WANT_BITS) { if (status || port_is_suspended(hub, portstatus) ||
!port_is_power_on(hub, portstatus) ||
!(portstatus & USB_PORT_STAT_CONNECTION)) {
if (status >= 0) if (status >= 0)
status = -ENODEV; status = -ENODEV;
} }
...@@ -2285,14 +2343,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) ...@@ -2285,14 +2343,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
} }
/* see 7.1.7.6 */ /* see 7.1.7.6 */
/* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0 if (hub_is_superspeed(hub->hdev))
* external hub. status = set_port_feature(hub->hdev,
* FIXME: this is a temporary workaround to make the system able port1 | (USB_SS_PORT_LS_U3 << 3),
* to suspend/resume. USB_PORT_FEAT_LINK_STATE);
*/
if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
status = clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_POWER);
else else
status = set_port_feature(hub->hdev, port1, status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND); USB_PORT_FEAT_SUSPEND);
...@@ -2439,7 +2493,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) ...@@ -2439,7 +2493,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */ /* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange); status = hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !(portstatus & USB_PORT_STAT_SUSPEND)) if (status == 0 && !port_is_suspended(hub, portstatus))
goto SuspendCleared; goto SuspendCleared;
// dev_dbg(hub->intfdev, "resume port %d\n", port1); // dev_dbg(hub->intfdev, "resume port %d\n", port1);
...@@ -2447,8 +2501,13 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) ...@@ -2447,8 +2501,13 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
set_bit(port1, hub->busy_bits); set_bit(port1, hub->busy_bits);
/* see 7.1.7.7; affects power usage, but not budgeting */ /* see 7.1.7.7; affects power usage, but not budgeting */
status = clear_port_feature(hub->hdev, if (hub_is_superspeed(hub->hdev))
port1, USB_PORT_FEAT_SUSPEND); status = set_port_feature(hub->hdev,
port1 | (USB_SS_PORT_LS_U0 << 3),
USB_PORT_FEAT_LINK_STATE);
else
status = clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_SUSPEND);
if (status) { if (status) {
dev_dbg(hub->intfdev, "can't resume port %d, status %d\n", dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
port1, status); port1, status);
...@@ -2470,9 +2529,15 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) ...@@ -2470,9 +2529,15 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
SuspendCleared: SuspendCleared:
if (status == 0) { if (status == 0) {
if (portchange & USB_PORT_STAT_C_SUSPEND) if (hub_is_superspeed(hub->hdev)) {
clear_port_feature(hub->hdev, port1, if (portchange & USB_PORT_STAT_C_LINK_STATE)
USB_PORT_FEAT_C_SUSPEND); clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
} else {
if (portchange & USB_PORT_STAT_C_SUSPEND)
clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
} }
clear_bit(port1, hub->busy_bits); clear_bit(port1, hub->busy_bits);
...@@ -3147,7 +3212,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, ...@@ -3147,7 +3212,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* maybe switch power back on (e.g. root hub was reset) */ /* maybe switch power back on (e.g. root hub was reset) */
if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
&& !(portstatus & USB_PORT_STAT_POWER)) && !port_is_power_on(hub, portstatus))
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE) if (portstatus & USB_PORT_STAT_ENABLE)
...@@ -3490,6 +3555,16 @@ static void hub_events(void) ...@@ -3490,6 +3555,16 @@ static void hub_events(void)
USB_PORT_FEAT_C_PORT_CONFIG_ERROR); USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
} }
/* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
if (hub_is_superspeed(hub->hdev) &&
(portstatus & USB_PORT_STAT_LINK_STATE)
== USB_SS_PORT_LS_SS_INACTIVE) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
hub_port_warm_reset(hub, i);
}
if (connect_change) if (connect_change)
hub_port_connect_change(hub, i, hub_port_connect_change(hub, i,
portstatus, portchange); portstatus, portchange);
......
...@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci) ...@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci)
static void xhci_print_ports(struct xhci_hcd *xhci) static void xhci_print_ports(struct xhci_hcd *xhci)
{ {
u32 __iomem *addr; __le32 __iomem *addr;
int i, j; int i, j;
int ports; int ports;
char *names[NUM_PORT_REGS] = { char *names[NUM_PORT_REGS] = {
...@@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
{ {
u64 address; u64 address;
u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK; u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
switch (type) { switch (type) {
case TRB_TYPE(TRB_LINK): case TRB_TYPE(TRB_LINK):
xhci_dbg(xhci, "Link TRB:\n"); xhci_dbg(xhci, "Link TRB:\n");
xhci_print_trb_offsets(xhci, trb); xhci_print_trb_offsets(xhci, trb);
address = trb->link.segment_ptr; address = le64_to_cpu(trb->link.segment_ptr);
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
xhci_dbg(xhci, "Interrupter target = 0x%x\n", xhci_dbg(xhci, "Interrupter target = 0x%x\n",
GET_INTR_TARGET(trb->link.intr_target)); GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
xhci_dbg(xhci, "Cycle bit = %u\n", xhci_dbg(xhci, "Cycle bit = %u\n",
(unsigned int) (trb->link.control & TRB_CYCLE)); (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE));
xhci_dbg(xhci, "Toggle cycle bit = %u\n", xhci_dbg(xhci, "Toggle cycle bit = %u\n",
(unsigned int) (trb->link.control & LINK_TOGGLE)); (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE));
xhci_dbg(xhci, "No Snoop bit = %u\n", xhci_dbg(xhci, "No Snoop bit = %u\n",
(unsigned int) (trb->link.control & TRB_NO_SNOOP)); (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP));
break; break;
case TRB_TYPE(TRB_TRANSFER): case TRB_TYPE(TRB_TRANSFER):
address = trb->trans_event.buffer; address = le64_to_cpu(trb->trans_event.buffer);
/* /*
* FIXME: look at flags to figure out if it's an address or if * FIXME: look at flags to figure out if it's an address or if
* the data is directly in the buffer field. * the data is directly in the buffer field.
...@@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
break; break;
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
address = trb->event_cmd.cmd_trb; address = le64_to_cpu(trb->event_cmd.cmd_trb);
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n", xhci_dbg(xhci, "Completion status = %u\n",
(unsigned int) GET_COMP_CODE(trb->event_cmd.status)); (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags); xhci_dbg(xhci, "Flags = 0x%x\n",
(unsigned int) le32_to_cpu(trb->event_cmd.flags));
break; break;
default: default:
xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
...@@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{ {
int i; int i;
u32 addr = (u32) seg->dma; u64 addr = seg->dma;
union xhci_trb *trb = seg->trbs; union xhci_trb *trb = seg->trbs;
for (i = 0; i < TRBS_PER_SEGMENT; ++i) { for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i]; trb = &seg->trbs[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
lower_32_bits(trb->link.segment_ptr), (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
upper_32_bits(trb->link.segment_ptr), (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
(unsigned int) trb->link.intr_target, (unsigned int) le32_to_cpu(trb->link.intr_target),
(unsigned int) trb->link.control); (unsigned int) le32_to_cpu(trb->link.control));
addr += sizeof(*trb); addr += sizeof(*trb);
} }
} }
...@@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci, ...@@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{ {
u32 addr = (u32) erst->erst_dma_addr; u64 addr = erst->erst_dma_addr;
int i; int i;
struct xhci_erst_entry *entry; struct xhci_erst_entry *entry;
for (i = 0; i < erst->num_entries; ++i) { for (i = 0; i < erst->num_entries; ++i) {
entry = &erst->entries[i]; entry = &erst->entries[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned int) addr, addr,
lower_32_bits(entry->seg_addr), lower_32_bits(le64_to_cpu(entry->seg_addr)),
upper_32_bits(entry->seg_addr), upper_32_bits(le64_to_cpu(entry->seg_addr)),
(unsigned int) entry->seg_size, (unsigned int) le32_to_cpu(entry->seg_size),
(unsigned int) entry->rsvd); (unsigned int) le32_to_cpu(entry->rsvd));
addr += sizeof(*entry); addr += sizeof(*entry);
} }
} }
...@@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, ...@@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
{ {
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
switch (GET_SLOT_STATE(slot_ctx->dev_state)) { switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
case 0: case 0:
return "enabled/disabled"; return "enabled/disabled";
case 1: case 1:
......
...@@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, ...@@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
temp |= 0x0008; temp |= 0x0008;
/* Bits 6:5 - no TTs in root ports */ /* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */ /* Bit 7 - no port indicators */
desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp); desc->wHubCharacteristics = cpu_to_le16(temp);
} }
/* Fill in the USB 2.0 roothub descriptor */ /* Fill in the USB 2.0 roothub descriptor */
...@@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) ...@@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
} }
static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
u16 wIndex, u32 __iomem *addr, u32 port_status) u16 wIndex, __le32 __iomem *addr, u32 port_status)
{ {
/* Don't allow the USB core to disable SuperSpeed ports. */ /* Don't allow the USB core to disable SuperSpeed ports. */
if (hcd->speed == HCD_USB3) { if (hcd->speed == HCD_USB3) {
...@@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, ...@@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
} }
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
u16 wIndex, u32 __iomem *addr, u32 port_status) u16 wIndex, __le32 __iomem *addr, u32 port_status)
{ {
char *port_change_bit; char *port_change_bit;
u32 status; u32 status;
...@@ -341,6 +341,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, ...@@ -341,6 +341,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_RC; status = PORT_RC;
port_change_bit = "reset"; port_change_bit = "reset";
break; break;
case USB_PORT_FEAT_C_BH_PORT_RESET:
status = PORT_WRC;
port_change_bit = "warm(BH) reset";
break;
case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_CONNECTION:
status = PORT_CSC; status = PORT_CSC;
port_change_bit = "connect"; port_change_bit = "connect";
...@@ -357,6 +361,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, ...@@ -357,6 +361,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
status = PORT_PLC; status = PORT_PLC;
port_change_bit = "suspend/resume"; port_change_bit = "suspend/resume";
break; break;
case USB_PORT_FEAT_C_PORT_LINK_STATE:
status = PORT_PLC;
port_change_bit = "link state";
break;
default: default:
/* Should never happen */ /* Should never happen */
return; return;
...@@ -376,9 +384,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -376,9 +384,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags; unsigned long flags;
u32 temp, temp1, status; u32 temp, temp1, status;
int retval = 0; int retval = 0;
u32 __iomem **port_array; __le32 __iomem **port_array;
int slot_id; int slot_id;
struct xhci_bus_state *bus_state; struct xhci_bus_state *bus_state;
u16 link_state = 0;
if (hcd->speed == HCD_USB3) { if (hcd->speed == HCD_USB3) {
ports = xhci->num_usb3_ports; ports = xhci->num_usb3_ports;
...@@ -422,9 +431,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -422,9 +431,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
} }
xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp); xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
/* FIXME - should we return a port status value like the USB
* 3.0 external hubs do?
*/
/* wPortChange bits */ /* wPortChange bits */
if (temp & PORT_CSC) if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16; status |= USB_PORT_STAT_C_CONNECTION << 16;
...@@ -432,13 +438,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -432,13 +438,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
status |= USB_PORT_STAT_C_ENABLE << 16; status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC)) if ((temp & PORT_OCC))
status |= USB_PORT_STAT_C_OVERCURRENT << 16; status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* if ((temp & PORT_RC))
* FIXME ignoring reset and USB 2.1/3.0 specific status |= USB_PORT_STAT_C_RESET << 16;
* changes /* USB3.0 only */
*/ if (hcd->speed == HCD_USB3) {
if ((temp & PORT_PLS_MASK) == XDEV_U3 if ((temp & PORT_PLC))
&& (temp & PORT_POWER)) status |= USB_PORT_STAT_C_LINK_STATE << 16;
status |= 1 << USB_PORT_FEAT_SUSPEND; if ((temp & PORT_WRC))
status |= USB_PORT_STAT_C_BH_RESET << 16;
}
if (hcd->speed != HCD_USB3) {
if ((temp & PORT_PLS_MASK) == XDEV_U3
&& (temp & PORT_POWER))
status |= USB_PORT_STAT_SUSPEND;
}
if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { if ((temp & PORT_PLS_MASK) == XDEV_RESUME) {
if ((temp & PORT_RESET) || !(temp & PORT_PE)) if ((temp & PORT_RESET) || !(temp & PORT_PE))
goto error; goto error;
...@@ -469,7 +483,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -469,7 +483,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
&& (temp & PORT_POWER) && (temp & PORT_POWER)
&& (bus_state->suspended_ports & (1 << wIndex))) { && (bus_state->suspended_ports & (1 << wIndex))) {
bus_state->suspended_ports &= ~(1 << wIndex); bus_state->suspended_ports &= ~(1 << wIndex);
bus_state->port_c_suspend |= 1 << wIndex; if (hcd->speed != HCD_USB3)
bus_state->port_c_suspend |= 1 << wIndex;
} }
if (temp & PORT_CONNECT) { if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION; status |= USB_PORT_STAT_CONNECTION;
...@@ -481,14 +496,28 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -481,14 +496,28 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
status |= USB_PORT_STAT_OVERCURRENT; status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET) if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET; status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER) if (temp & PORT_POWER) {
status |= USB_PORT_STAT_POWER; if (hcd->speed == HCD_USB3)
status |= USB_SS_PORT_STAT_POWER;
else
status |= USB_PORT_STAT_POWER;
}
/* Port Link State */
if (hcd->speed == HCD_USB3) {
/* resume state is a xHCI internal state.
* Do not report it to usb core.
*/
if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
status |= (temp & PORT_PLS_MASK);
}
if (bus_state->port_c_suspend & (1 << wIndex)) if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND; status |= 1 << USB_PORT_FEAT_C_SUSPEND;
xhci_dbg(xhci, "Get port status returned 0x%x\n", status); xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf); put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break; break;
case SetPortFeature: case SetPortFeature:
if (wValue == USB_PORT_FEAT_LINK_STATE)
link_state = (wIndex & 0xff00) >> 3;
wIndex &= 0xff; wIndex &= 0xff;
if (!wIndex || wIndex > ports) if (!wIndex || wIndex > ports)
goto error; goto error;
...@@ -537,6 +566,44 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -537,6 +566,44 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_readl(xhci, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]);
bus_state->suspended_ports |= 1 << wIndex; bus_state->suspended_ports |= 1 << wIndex;
break; break;
case USB_PORT_FEAT_LINK_STATE:
temp = xhci_readl(xhci, port_array[wIndex]);
/* Software should not attempt to set
* port link state above '5' (Rx.Detect) and the port
* must be enabled.
*/
if ((temp & PORT_PE) == 0 ||
(link_state > USB_SS_PORT_LS_RX_DETECT)) {
xhci_warn(xhci, "Cannot set link state.\n");
goto error;
}
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
if (slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
}
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
xhci_writel(xhci, temp, port_array[wIndex]);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
temp = xhci_readl(xhci, port_array[wIndex]);
if (link_state == USB_SS_PORT_LS_U3)
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_POWER: case USB_PORT_FEAT_POWER:
/* /*
* Turn on ports, even if there isn't per-port switching. * Turn on ports, even if there isn't per-port switching.
...@@ -557,6 +624,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -557,6 +624,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_readl(xhci, port_array[wIndex]); temp = xhci_readl(xhci, port_array[wIndex]);
xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp); xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
break; break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
xhci_writel(xhci, temp, port_array[wIndex]);
temp = xhci_readl(xhci, port_array[wIndex]);
break;
default: default:
goto error; goto error;
} }
...@@ -584,35 +657,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -584,35 +657,27 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (temp & XDEV_U3) { if (temp & XDEV_U3) {
if ((temp & PORT_PE) == 0) if ((temp & PORT_PE) == 0)
goto error; goto error;
if (DEV_SUPERSPEED(temp)) {
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
xhci_writel(xhci, temp,
port_array[wIndex]);
xhci_readl(xhci, port_array[wIndex]);
} else {
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_RESUME;
xhci_writel(xhci, temp,
port_array[wIndex]);
spin_unlock_irqrestore(&xhci->lock, temp = xhci_port_state_to_neutral(temp);
flags); temp &= ~PORT_PLS_MASK;
msleep(20); temp |= PORT_LINK_STROBE | XDEV_RESUME;
spin_lock_irqsave(&xhci->lock, flags); xhci_writel(xhci, temp,
port_array[wIndex]);
temp = xhci_readl(xhci, spin_unlock_irqrestore(&xhci->lock,
port_array[wIndex]); flags);
temp = xhci_port_state_to_neutral(temp); msleep(20);
temp &= ~PORT_PLS_MASK; spin_lock_irqsave(&xhci->lock, flags);
temp |= PORT_LINK_STROBE | XDEV_U0;
xhci_writel(xhci, temp, temp = xhci_readl(xhci,
port_array[wIndex]); port_array[wIndex]);
} temp = xhci_port_state_to_neutral(temp);
bus_state->port_c_suspend |= 1 << wIndex; temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
xhci_writel(xhci, temp,
port_array[wIndex]);
} }
bus_state->port_c_suspend |= 1 << wIndex;
slot_id = xhci_find_slot_id_by_port(hcd, xhci, slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1); wIndex + 1);
...@@ -625,9 +690,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, ...@@ -625,9 +690,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_SUSPEND: case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex); bus_state->port_c_suspend &= ~(1 << wIndex);
case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_CONNECTION:
case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_OVER_CURRENT:
case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_PORT_LINK_STATE:
xhci_clear_port_change_bit(xhci, wValue, wIndex, xhci_clear_port_change_bit(xhci, wValue, wIndex,
port_array[wIndex], temp); port_array[wIndex], temp);
break; break;
...@@ -664,7 +731,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) ...@@ -664,7 +731,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
int i, retval; int i, retval;
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ports; int ports;
u32 __iomem **port_array; __le32 __iomem **port_array;
struct xhci_bus_state *bus_state; struct xhci_bus_state *bus_state;
if (hcd->speed == HCD_USB3) { if (hcd->speed == HCD_USB3) {
...@@ -681,7 +748,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) ...@@ -681,7 +748,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
memset(buf, 0, retval); memset(buf, 0, retval);
status = 0; status = 0;
mask = PORT_CSC | PORT_PEC | PORT_OCC; mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */ /* For each port, did anything change? If so, set that bit in buf. */
...@@ -709,7 +776,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) ...@@ -709,7 +776,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
{ {
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports, port_index; int max_ports, port_index;
u32 __iomem **port_array; __le32 __iomem **port_array;
struct xhci_bus_state *bus_state; struct xhci_bus_state *bus_state;
unsigned long flags; unsigned long flags;
...@@ -779,7 +846,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) ...@@ -779,7 +846,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
if (DEV_HIGHSPEED(t1)) { if (DEV_HIGHSPEED(t1)) {
/* enable remote wake up for USB 2.0 */ /* enable remote wake up for USB 2.0 */
u32 __iomem *addr; __le32 __iomem *addr;
u32 tmp; u32 tmp;
/* Add one to the port status register address to get /* Add one to the port status register address to get
...@@ -801,7 +868,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) ...@@ -801,7 +868,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
{ {
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int max_ports, port_index; int max_ports, port_index;
u32 __iomem **port_array; __le32 __iomem **port_array;
struct xhci_bus_state *bus_state; struct xhci_bus_state *bus_state;
u32 temp; u32 temp;
unsigned long flags; unsigned long flags;
...@@ -875,7 +942,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) ...@@ -875,7 +942,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (DEV_HIGHSPEED(temp)) { if (DEV_HIGHSPEED(temp)) {
/* disable remote wake up for USB 2.0 */ /* disable remote wake up for USB 2.0 */
u32 __iomem *addr; __le32 __iomem *addr;
u32 tmp; u32 tmp;
/* Add one to the port status register address to get /* Add one to the port status register address to get
......
...@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
return; return;
prev->next = next; prev->next = next;
if (link_trbs) { if (link_trbs) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; prev->trbs[TRBS_PER_SEGMENT-1].link.
segment_ptr = cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK; val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK); val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */ /* Always set the chain bit with 0.95 hardware */
if (xhci_link_trb_quirk(xhci)) if (xhci_link_trb_quirk(xhci))
val |= TRB_CHAIN; val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
} }
xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
(unsigned long long)prev->dma, (unsigned long long)prev->dma,
...@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
if (link_trbs) { if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */ /* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); prev->trbs[TRBS_PER_SEGMENT-1].link.
control |= cpu_to_le32(LINK_TOGGLE);
xhci_dbg(xhci, "Wrote link toggle flag to" xhci_dbg(xhci, "Wrote link toggle flag to"
" segment %p (virtual), 0x%llx (DMA)\n", " segment %p (virtual), 0x%llx (DMA)\n",
prev, (unsigned long long)prev->dma); prev, (unsigned long long)prev->dma);
...@@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, ...@@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
addr = cur_ring->first_seg->dma | addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) | SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state; cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring = addr; stream_info->stream_ctx_array[cur_stream].
stream_ring = cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr); cur_stream, (unsigned long long) addr);
...@@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, ...@@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
1 << (max_primary_streams + 1)); 1 << (max_primary_streams + 1));
ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams); ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
ep_ctx->ep_info |= EP_HAS_LSA; | EP_HAS_LSA);
ep_ctx->deq = stream_info->ctx_array_dma; ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
} }
/* /*
...@@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, ...@@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep) struct xhci_virt_ep *ep)
{ {
dma_addr_t addr; dma_addr_t addr;
ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK; ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
ep_ctx->ep_info &= ~EP_HAS_LSA;
addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
ep_ctx->deq = addr | ep->ring->cycle_state; ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
} }
/* Frees all stream contexts associated with the endpoint, /* Frees all stream contexts associated with the endpoint,
...@@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
dev->udev = udev; dev->udev = udev;
/* Point to output device context in dcbaa. */ /* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id, slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id], &xhci->dcbaa->dev_context_ptrs[slot_id],
(unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
return 1; return 1;
fail: fail:
...@@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, ...@@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* configured device has reset, so all control transfers should have * configured device has reset, so all control transfers should have
* been completed or cancelled before the reset. * been completed or cancelled before the reset.
*/ */
ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
ep0_ctx->deq |= ep_ring->cycle_state; ep_ring->enqueue)
| ep_ring->cycle_state);
} }
/* /*
...@@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 2) New slot context and endpoint 0 context are valid*/ /* 2) New slot context and endpoint 0 context are valid*/
ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
/* 3) Only the control endpoint is valid - one endpoint context */ /* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= LAST_CTX(1); slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route);
slot_ctx->dev_info |= (u32) udev->route;
switch (udev->speed) { switch (udev->speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS);
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS);
break; break;
case USB_SPEED_FULL: case USB_SPEED_FULL:
slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS);
break; break;
case USB_SPEED_LOW: case USB_SPEED_LOW:
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS);
break; break;
case USB_SPEED_WIRELESS: case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
...@@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
port_num = xhci_find_real_port_number(xhci, udev); port_num = xhci_find_real_port_number(xhci, udev);
if (!port_num) if (!port_num)
return -EINVAL; return -EINVAL;
slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num); slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num));
/* Set the port number in the virtual_device to the faked port number */ /* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent; for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent) top_dev = top_dev->parent)
...@@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* Is this a LS/FS device under an external HS hub? */ /* Is this a LS/FS device under an external HS hub? */
if (udev->tt && udev->tt->hub->parent) { if (udev->tt && udev->tt->hub->parent) {
slot_ctx->tt_info = udev->tt->hub->slot_id; slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
slot_ctx->tt_info |= udev->ttport << 8; (udev->ttport << 8));
if (udev->tt->multi) if (udev->tt->multi)
slot_ctx->dev_info |= DEV_MTT; slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
} }
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */ /* Step 4 - ring already allocated */
/* Step 5 */ /* Step 5 */
ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
/* /*
* XXX: Not sure about wireless USB devices. * XXX: Not sure about wireless USB devices.
*/ */
switch (udev->speed) { switch (udev->speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
ep0_ctx->ep_info2 |= MAX_PACKET(512); ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
/* USB core guesses at a 64-byte max packet first for FS devices */ /* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL: case USB_SPEED_FULL:
ep0_ctx->ep_info2 |= MAX_PACKET(64); ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
break; break;
case USB_SPEED_LOW: case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8); ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
break; break;
case USB_SPEED_WIRELESS: case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
...@@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
BUG(); BUG();
} }
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq = ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
dev->eps[0].ring->first_seg->dma; dev->eps[0].ring->cycle_state);
ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
...@@ -1131,10 +1130,10 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, ...@@ -1131,10 +1130,10 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
return 0; return 0;
if (udev->speed == USB_SPEED_SUPER) if (udev->speed == USB_SPEED_SUPER)
return ep->ss_ep_comp.wBytesPerInterval; return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */ /* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1); return max_packet * (max_burst + 1);
} }
...@@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
} }
virt_dev->eps[ep_index].skip = false; virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring; ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
/* FIXME dig Mult and streams info out of ep companion desc */ /* FIXME dig Mult and streams info out of ep companion desc */
...@@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* error count = 0 means infinite retries. * error count = 0 means infinite retries.
*/ */
if (!usb_endpoint_xfer_isoc(&ep->desc)) if (!usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->ep_info2 = ERROR_COUNT(3); ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
else else
ep_ctx->ep_info2 = ERROR_COUNT(1); ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(1));
ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
/* Set the max packet size and max burst */ /* Set the max packet size and max burst */
switch (udev->speed) { switch (udev->speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
max_packet = ep->desc.wMaxPacketSize; max_packet = le16_to_cpu(ep->desc.wMaxPacketSize);
ep_ctx->ep_info2 |= MAX_PACKET(max_packet); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */ /* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp.bMaxBurst; max_packet = ep->ss_ep_comp.bMaxBurst;
if (!max_packet) if (!max_packet)
xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n"); xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
ep_ctx->ep_info2 |= MAX_BURST(max_packet); ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
/* bits 11:12 specify the number of additional transaction /* bits 11:12 specify the number of additional transaction
...@@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/ */
if (usb_endpoint_xfer_isoc(&ep->desc) || if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) { usb_endpoint_xfer_int(&ep->desc)) {
max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize)
ep_ctx->ep_info2 |= MAX_BURST(max_burst); & 0x1800) >> 11;
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
} }
/* Fall through */ /* Fall through */
case USB_SPEED_FULL: case USB_SPEED_FULL:
case USB_SPEED_LOW: case USB_SPEED_LOW:
max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
ep_ctx->ep_info2 |= MAX_PACKET(max_packet); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
break; break;
default: default:
BUG(); BUG();
} }
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
/* /*
* XXX no idea how to calculate the average TRB buffer length for bulk * XXX no idea how to calculate the average TRB buffer length for bulk
...@@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* use Event Data TRBs, and we don't chain in a link TRB on short * use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1. * transfers, we're basically dividing by 1.
*/ */
ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
/* FIXME Debug endpoint context */ /* FIXME Debug endpoint context */
return 0; return 0;
...@@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) ...@@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad->sp_dma_buffers) if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4; goto fail_sp4;
xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) { for (i = 0; i < num_sp; i++) {
dma_addr_t dma; dma_addr_t dma;
void *buf = pci_alloc_consistent(to_pci_dev(dev), void *buf = pci_alloc_consistent(to_pci_dev(dev),
...@@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) ...@@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
} }
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
u32 __iomem *addr, u8 major_revision) __le32 __iomem *addr, u8 major_revision)
{ {
u32 temp, port_offset, port_count; u32 temp, port_offset, port_count;
int i; int i;
...@@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, ...@@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
*/ */
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{ {
u32 __iomem *addr; __le32 __iomem *addr;
u32 offset; u32 offset;
unsigned int num_ports; unsigned int num_ports;
int i, port_index; int i, port_index;
...@@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set ring base address and size for each segment table entry */ /* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val]; struct xhci_erst_entry *entry = &xhci->erst.entries[val];
entry->seg_addr = seg->dma; entry->seg_addr = cpu_to_le64(seg->dma);
entry->seg_size = TRBS_PER_SEGMENT; entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0; entry->rsvd = 0;
seg = seg->next; seg = seg->next;
} }
......
...@@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -100,7 +100,7 @@ static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
(seg->next == xhci->event_ring->first_seg); (seg->next == xhci->event_ring->first_seg);
else else
return trb->link.control & LINK_TOGGLE; return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
} }
/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
...@@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -113,13 +113,15 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ring == xhci->event_ring) if (ring == xhci->event_ring)
return trb == &seg->trbs[TRBS_PER_SEGMENT]; return trb == &seg->trbs[TRBS_PER_SEGMENT];
else else
return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
== TRB_TYPE(TRB_LINK);
} }
static int enqueue_is_link_trb(struct xhci_ring *ring) static int enqueue_is_link_trb(struct xhci_ring *ring)
{ {
struct xhci_link_trb *link = &ring->enqueue->link; struct xhci_link_trb *link = &ring->enqueue->link;
return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
TRB_TYPE(TRB_LINK));
} }
/* Updates trb to point to the next TRB in the ring, and updates seg if the next /* Updates trb to point to the next TRB in the ring, and updates seg if the next
...@@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -197,7 +199,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
union xhci_trb *next; union xhci_trb *next;
unsigned long long addr; unsigned long long addr;
chain = ring->enqueue->generic.field[3] & TRB_CHAIN; chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
next = ++(ring->enqueue); next = ++(ring->enqueue);
ring->enq_updates++; ring->enq_updates++;
...@@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -223,12 +225,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* (which may mean the chain bit is cleared). * (which may mean the chain bit is cleared).
*/ */
if (!xhci_link_trb_quirk(xhci)) { if (!xhci_link_trb_quirk(xhci)) {
next->link.control &= ~TRB_CHAIN; next->link.control &=
next->link.control |= chain; cpu_to_le32(~TRB_CHAIN);
next->link.control |=
cpu_to_le32(chain);
} }
/* Give this link TRB to the hardware */ /* Give this link TRB to the hardware */
wmb(); wmb();
next->link.control ^= TRB_CYCLE; next->link.control ^= cpu_to_le32(TRB_CYCLE);
} }
/* Toggle the cycle bit after the last ring segment. */ /* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
...@@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, ...@@ -319,7 +323,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int ep_index, unsigned int ep_index,
unsigned int stream_id) unsigned int stream_id)
{ {
__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
unsigned int ep_state = ep->ep_state; unsigned int ep_state = ep->ep_state;
...@@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg( ...@@ -380,7 +384,7 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb || while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
if (generic_trb->field[3] & LINK_TOGGLE) if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
*cycle_state ^= 0x1; *cycle_state ^= 0x1;
cur_seg = cur_seg->next; cur_seg = cur_seg->next;
if (cur_seg == start_seg) if (cur_seg == start_seg)
...@@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, ...@@ -447,6 +451,10 @@ static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
* any link TRBs with the toggle cycle bit set. * any link TRBs with the toggle cycle bit set.
* - Finally we move the dequeue state one TRB further, toggling the cycle bit * - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set. * if we've moved it past a link TRB with the toggle cycle bit set.
*
* Some of the uses of xhci_generic_trb are grotty, but if they're done
* with correct __le32 accesses they should work fine. Only users of this are
* in here.
*/ */
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
...@@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -480,7 +488,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n"); xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & ep_ctx->deq; state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
state->new_deq_ptr = cur_td->last_trb; state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
...@@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -493,8 +501,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
} }
trb = &state->new_deq_ptr->generic; trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
(trb->field[3] & LINK_TOGGLE)) TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
state->new_cycle_state ^= 0x1; state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
...@@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -529,12 +537,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
true; true;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) == if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
TRB_TYPE(TRB_LINK)) { == TRB_TYPE(TRB_LINK)) {
/* Unchain any chained Link TRBs, but /* Unchain any chained Link TRBs, but
* leave the pointers intact. * leave the pointers intact.
*/ */
cur_trb->generic.field[3] &= ~TRB_CHAIN; cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
xhci_dbg(xhci, "Address = %p (0x%llx dma); " xhci_dbg(xhci, "Address = %p (0x%llx dma); "
"in seg %p (0x%llx dma)\n", "in seg %p (0x%llx dma)\n",
...@@ -547,8 +555,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -547,8 +555,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cur_trb->generic.field[1] = 0; cur_trb->generic.field[1] = 0;
cur_trb->generic.field[2] = 0; cur_trb->generic.field[2] = 0;
/* Preserve only the cycle bit of this TRB */ /* Preserve only the cycle bit of this TRB */
cur_trb->generic.field[3] &= TRB_CYCLE; cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
"in seg %p (0x%llx dma)\n", "in seg %p (0x%llx dma)\n",
cur_trb, cur_trb,
...@@ -662,9 +671,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -662,9 +671,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state; struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT( if (unlikely(TRB_TO_SUSPEND_PORT(
xhci->cmd_ring->dequeue->generic.field[3]))) { le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
slot_id = TRB_TO_SLOT_ID( slot_id = TRB_TO_SLOT_ID(
xhci->cmd_ring->dequeue->generic.field[3]); le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id]; virt_dev = xhci->devs[slot_id];
if (virt_dev) if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev, handle_cmd_in_cmd_wait_list(xhci, virt_dev,
...@@ -677,8 +686,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -677,8 +686,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
} }
memset(&deq_state, 0, sizeof(deq_state)); memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = &xhci->devs[slot_id]->eps[ep_index]; ep = &xhci->devs[slot_id]->eps[ep_index];
if (list_empty(&ep->cancelled_td_list)) { if (list_empty(&ep->cancelled_td_list)) {
...@@ -910,9 +919,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -910,9 +919,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx; struct xhci_slot_ctx *slot_ctx;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
...@@ -928,11 +937,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -928,11 +937,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
unsigned int ep_state; unsigned int ep_state;
unsigned int slot_state; unsigned int slot_state;
switch (GET_COMP_CODE(event->status)) { switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
case COMP_TRB_ERR: case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because " xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n"); "of stream ID configuration\n");
...@@ -940,9 +949,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -940,9 +949,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
case COMP_CTX_STATE: case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n"); "to incorrect slot or ep state.\n");
ep_state = ep_ctx->ep_info; ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK; ep_state &= EP_STATE_MASK;
slot_state = slot_ctx->dev_state; slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state); slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
slot_state, ep_state); slot_state, ep_state);
...@@ -954,7 +963,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -954,7 +963,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
default: default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown " xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n", "completion code of %u.\n",
GET_COMP_CODE(event->status)); GET_COMP_CODE(le32_to_cpu(event->status)));
break; break;
} }
/* OK what do we do now? The endpoint state is hosed, and we /* OK what do we do now? The endpoint state is hosed, and we
...@@ -965,10 +974,10 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -965,10 +974,10 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
*/ */
} else { } else {
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
ep_ctx->deq); le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) == dev->eps[ep_index].queued_deq_ptr) ==
(ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
/* Update the ring's dequeue segment and dequeue pointer /* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position. * to reflect the new position.
*/ */
...@@ -997,13 +1006,13 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, ...@@ -997,13 +1006,13 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
int slot_id; int slot_id;
unsigned int ep_index; unsigned int ep_index;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
/* This command will only fail if the endpoint wasn't halted, /* This command will only fail if the endpoint wasn't halted,
* but we don't care. * but we don't care.
*/ */
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
(unsigned int) GET_COMP_CODE(event->status)); (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
/* HW with the reset endpoint quirk needs to have a configure endpoint /* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here * command complete before the endpoint can be used. Queue that here
...@@ -1040,8 +1049,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, ...@@ -1040,8 +1049,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
if (xhci->cmd_ring->dequeue != command->command_trb) if (xhci->cmd_ring->dequeue != command->command_trb)
return 0; return 0;
command->status = command->status = GET_COMP_CODE(le32_to_cpu(event->status));
GET_COMP_CODE(event->status);
list_del(&command->cmd_list); list_del(&command->cmd_list);
if (command->completion) if (command->completion)
complete(command->completion); complete(command->completion);
...@@ -1053,7 +1061,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, ...@@ -1053,7 +1061,7 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
static void handle_cmd_completion(struct xhci_hcd *xhci, static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event) struct xhci_event_cmd *event)
{ {
int slot_id = TRB_TO_SLOT_ID(event->flags); int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma; u64 cmd_dma;
dma_addr_t cmd_dequeue_dma; dma_addr_t cmd_dequeue_dma;
struct xhci_input_control_ctx *ctrl_ctx; struct xhci_input_control_ctx *ctrl_ctx;
...@@ -1062,7 +1070,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1062,7 +1070,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
unsigned int ep_state; unsigned int ep_state;
cmd_dma = event->cmd_trb; cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue); xhci->cmd_ring->dequeue);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */ /* Is the command ring deq ptr out of sync with the deq seg ptr? */
...@@ -1075,9 +1083,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1075,9 +1083,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci->error_bitmask |= 1 << 5; xhci->error_bitmask |= 1 << 5;
return; return;
} }
switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
& TRB_TYPE_BITMASK) {
case TRB_TYPE(TRB_ENABLE_SLOT): case TRB_TYPE(TRB_ENABLE_SLOT):
if (GET_COMP_CODE(event->status) == COMP_SUCCESS) if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
xhci->slot_id = slot_id; xhci->slot_id = slot_id;
else else
xhci->slot_id = 0; xhci->slot_id = 0;
...@@ -1102,7 +1111,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1102,7 +1111,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
ctrl_ctx = xhci_get_input_control_ctx(xhci, ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx); virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */ /* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
/* A usb_set_interface() call directly after clearing a halted /* A usb_set_interface() call directly after clearing a halted
* condition may race on this quirky hardware. Not worth * condition may race on this quirky hardware. Not worth
* worrying about, since this is prototype hardware. Not sure * worrying about, since this is prototype hardware. Not sure
...@@ -1111,8 +1120,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1111,8 +1120,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
*/ */
if (xhci->quirks & XHCI_RESET_EP_QUIRK && if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_index != (unsigned int) -1 && ep_index != (unsigned int) -1 &&
ctrl_ctx->add_flags - SLOT_FLAG == le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
ctrl_ctx->drop_flags) { le32_to_cpu(ctrl_ctx->drop_flags)) {
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (!(ep_state & EP_HALTED)) if (!(ep_state & EP_HALTED))
...@@ -1129,18 +1138,18 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1129,18 +1138,18 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
bandwidth_change: bandwidth_change:
xhci_dbg(xhci, "Completed config ep cmd\n"); xhci_dbg(xhci, "Completed config ep cmd\n");
xhci->devs[slot_id]->cmd_status = xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(event->status); GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion); complete(&xhci->devs[slot_id]->cmd_completion);
break; break;
case TRB_TYPE(TRB_EVAL_CONTEXT): case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id]; virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break; break;
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion); complete(&xhci->devs[slot_id]->cmd_completion);
break; break;
case TRB_TYPE(TRB_ADDR_DEV): case TRB_TYPE(TRB_ADDR_DEV):
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->addr_dev); complete(&xhci->addr_dev);
break; break;
case TRB_TYPE(TRB_STOP_RING): case TRB_TYPE(TRB_STOP_RING):
...@@ -1157,7 +1166,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1157,7 +1166,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_TYPE(TRB_RESET_DEV): case TRB_TYPE(TRB_RESET_DEV):
xhci_dbg(xhci, "Completed reset device command.\n"); xhci_dbg(xhci, "Completed reset device command.\n");
slot_id = TRB_TO_SLOT_ID( slot_id = TRB_TO_SLOT_ID(
xhci->cmd_ring->dequeue->generic.field[3]); le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
virt_dev = xhci->devs[slot_id]; virt_dev = xhci->devs[slot_id];
if (virt_dev) if (virt_dev)
handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
...@@ -1171,8 +1180,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -1171,8 +1180,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break; break;
} }
xhci_dbg(xhci, "NEC firmware version %2x.%02x\n", xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
NEC_FW_MAJOR(event->status), NEC_FW_MAJOR(le32_to_cpu(event->status)),
NEC_FW_MINOR(event->status)); NEC_FW_MINOR(le32_to_cpu(event->status)));
break; break;
default: default:
/* Skip over unknown commands on the event ring */ /* Skip over unknown commands on the event ring */
...@@ -1187,7 +1196,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci, ...@@ -1187,7 +1196,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
{ {
u32 trb_type; u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]); trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd); handle_cmd_completion(xhci, &event->event_cmd);
...@@ -1241,15 +1250,15 @@ static void handle_port_status(struct xhci_hcd *xhci, ...@@ -1241,15 +1250,15 @@ static void handle_port_status(struct xhci_hcd *xhci,
unsigned int faked_port_index; unsigned int faked_port_index;
u8 major_revision; u8 major_revision;
struct xhci_bus_state *bus_state; struct xhci_bus_state *bus_state;
u32 __iomem **port_array; __le32 __iomem **port_array;
bool bogus_port_status = false; bool bogus_port_status = false;
/* Port status change events always have a successful completion code */ /* Port status change events always have a successful completion code */
if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
xhci->error_bitmask |= 1 << 8; xhci->error_bitmask |= 1 << 8;
} }
port_id = GET_PORT_ID(event->generic.field[0]); port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
max_ports = HCS_MAX_PORTS(xhci->hcs_params1); max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
...@@ -1456,7 +1465,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, ...@@ -1456,7 +1465,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
* endpoint anyway. Check if a babble halted the * endpoint anyway. Check if a babble halted the
* endpoint. * endpoint.
*/ */
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
return 1; return 1;
return 0; return 0;
...@@ -1494,12 +1503,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1494,12 +1503,12 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct urb_priv *urb_priv; struct urb_priv *urb_priv;
u32 trb_comp_code; u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(event->flags); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id]; xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(event->flags) - 1; ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(event->transfer_len); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
if (skip) if (skip)
goto td_cleanup; goto td_cleanup;
...@@ -1602,12 +1611,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1602,12 +1611,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code; u32 trb_comp_code;
slot_id = TRB_TO_SLOT_ID(event->flags); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id]; xdev = xhci->devs[slot_id];
ep_index = TRB_TO_EP_ID(event->flags) - 1; ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(event->transfer_len); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
xhci_debug_trb(xhci, xhci->event_ring->dequeue); xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (trb_comp_code) { switch (trb_comp_code) {
...@@ -1646,7 +1655,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1646,7 +1655,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
event_trb != td->last_trb) event_trb != td->last_trb)
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length td->urb->transfer_buffer_length
- TRB_LEN(event->transfer_len); - TRB_LEN(le32_to_cpu(event->transfer_len));
else else
td->urb->actual_length = 0; td->urb->actual_length = 0;
...@@ -1680,7 +1689,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1680,7 +1689,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* We didn't stop on a link TRB in the middle */ /* We didn't stop on a link TRB in the middle */
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length - td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len); TRB_LEN(le32_to_cpu(event->transfer_len));
xhci_dbg(xhci, "Waiting for status " xhci_dbg(xhci, "Waiting for status "
"stage event\n"); "stage event\n");
return 0; return 0;
...@@ -1708,8 +1717,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1708,8 +1717,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
u32 trb_comp_code; u32 trb_comp_code;
bool skip_td = false; bool skip_td = false;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(event->transfer_len); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv; urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt; idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx]; frame = &td->urb->iso_frame_desc[idx];
...@@ -1752,15 +1761,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1752,15 +1761,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
for (cur_trb = ep_ring->dequeue, for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb; cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if ((cur_trb->generic.field[3] & if ((le32_to_cpu(cur_trb->generic.field[3]) &
TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
(cur_trb->generic.field[3] & (le32_to_cpu(cur_trb->generic.field[3]) &
TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
len += len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
TRB_LEN(cur_trb->generic.field[2]);
} }
len += TRB_LEN(cur_trb->generic.field[2]) - len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(event->transfer_len); TRB_LEN(le32_to_cpu(event->transfer_len));
if (trb_comp_code != COMP_STOP_INVAL) { if (trb_comp_code != COMP_STOP_INVAL) {
frame->actual_length = len; frame->actual_length = len;
...@@ -1815,8 +1823,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1815,8 +1823,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_segment *cur_seg; struct xhci_segment *cur_seg;
u32 trb_comp_code; u32 trb_comp_code;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(event->transfer_len); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
switch (trb_comp_code) { switch (trb_comp_code) {
case COMP_SUCCESS: case COMP_SUCCESS:
...@@ -1852,18 +1860,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1852,18 +1860,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
"%d bytes untransferred\n", "%d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress, td->urb->ep->desc.bEndpointAddress,
td->urb->transfer_buffer_length, td->urb->transfer_buffer_length,
TRB_LEN(event->transfer_len)); TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */ /* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) { if (event_trb == td->last_trb) {
if (TRB_LEN(event->transfer_len) != 0) { if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length - td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len); TRB_LEN(le32_to_cpu(event->transfer_len));
if (td->urb->transfer_buffer_length < if (td->urb->transfer_buffer_length <
td->urb->actual_length) { td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length " xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n", "of %d bytes left\n",
TRB_LEN(event->transfer_len)); TRB_LEN(le32_to_cpu(event->transfer_len)));
td->urb->actual_length = 0; td->urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK) if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO; *status = -EREMOTEIO;
...@@ -1894,20 +1902,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, ...@@ -1894,20 +1902,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
if ((cur_trb->generic.field[3] & if ((le32_to_cpu(cur_trb->generic.field[3]) &
TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
(cur_trb->generic.field[3] & (le32_to_cpu(cur_trb->generic.field[3]) &
TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
td->urb->actual_length += td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]); TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
} }
/* If the ring didn't stop on a Link or No-op TRB, add /* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB * in the actual bytes transferred from the Normal TRB
*/ */
if (trb_comp_code != COMP_STOP_INVAL) if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length += td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]) - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
TRB_LEN(event->transfer_len); TRB_LEN(le32_to_cpu(event->transfer_len));
} }
return finish_td(xhci, td, event_trb, event, ep, status, false); return finish_td(xhci, td, event_trb, event, ep, status, false);
...@@ -1937,7 +1945,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1937,7 +1945,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
u32 trb_comp_code; u32 trb_comp_code;
int ret = 0; int ret = 0;
slot_id = TRB_TO_SLOT_ID(event->flags); slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id]; xdev = xhci->devs[slot_id];
if (!xdev) { if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
...@@ -1945,20 +1953,21 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1945,20 +1953,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
} }
/* Endpoint ID is 1 based, our index is zero based */ /* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(event->flags) - 1; ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index]; ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || if (!ep_ring ||
(ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint " xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n"); "or incorrect stream ring\n");
return -ENODEV; return -ENODEV;
} }
event_dma = event->buffer; event_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(event->transfer_len); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */ /* Look for common error cases */
switch (trb_comp_code) { switch (trb_comp_code) {
/* Skip codes that require special handling depending on /* Skip codes that require special handling depending on
...@@ -2011,14 +2020,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2011,14 +2020,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!list_empty(&ep_ring->td_list)) if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Underrun Event for slot %d ep %d " xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
"still with TDs queued?\n", "still with TDs queued?\n",
TRB_TO_SLOT_ID(event->flags), ep_index); TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup; goto cleanup;
case COMP_OVERRUN: case COMP_OVERRUN:
xhci_dbg(xhci, "overrun event on endpoint\n"); xhci_dbg(xhci, "overrun event on endpoint\n");
if (!list_empty(&ep_ring->td_list)) if (!list_empty(&ep_ring->td_list))
xhci_dbg(xhci, "Overrun Event for slot %d ep %d " xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
"still with TDs queued?\n", "still with TDs queued?\n",
TRB_TO_SLOT_ID(event->flags), ep_index); TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup; goto cleanup;
case COMP_MISSED_INT: case COMP_MISSED_INT:
/* /*
...@@ -2047,9 +2058,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2047,9 +2058,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (list_empty(&ep_ring->td_list)) { if (list_empty(&ep_ring->td_list)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
"with no TDs queued?\n", "with no TDs queued?\n",
TRB_TO_SLOT_ID(event->flags), ep_index); TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); (unsigned int) (le32_to_cpu(event->flags)
& TRB_TYPE_BITMASK)>>10);
xhci_print_trb_offsets(xhci, (union xhci_trb *) event); xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
if (ep->skip) { if (ep->skip) {
ep->skip = false; ep->skip = false;
...@@ -2092,7 +2105,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2092,7 +2105,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* corresponding TD has been cancelled. Just ignore * corresponding TD has been cancelled. Just ignore
* the TD. * the TD.
*/ */
if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) if ((le32_to_cpu(event_trb->generic.field[3])
& TRB_TYPE_BITMASK)
== TRB_TYPE(TRB_TR_NOOP)) { == TRB_TYPE(TRB_TR_NOOP)) {
xhci_dbg(xhci, xhci_dbg(xhci,
"event_trb is a no-op TRB. Skip it\n"); "event_trb is a no-op TRB. Skip it\n");
...@@ -2157,8 +2171,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -2157,8 +2171,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* /*
* This function handles all OS-owned events on the event ring. It may drop * This function handles all OS-owned events on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes). * xhci->lock between event processing (e.g. to pass up port status changes).
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/ */
static void xhci_handle_event(struct xhci_hcd *xhci) static int xhci_handle_event(struct xhci_hcd *xhci)
{ {
union xhci_trb *event; union xhci_trb *event;
int update_ptrs = 1; int update_ptrs = 1;
...@@ -2167,20 +2183,25 @@ static void xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2167,20 +2183,25 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
xhci_dbg(xhci, "In %s\n", __func__); xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) { if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1; xhci->error_bitmask |= 1 << 1;
return; return 0;
} }
event = xhci->event_ring->dequeue; event = xhci->event_ring->dequeue;
/* Does the HC or OS own the TRB? */ /* Does the HC or OS own the TRB? */
if ((event->event_cmd.flags & TRB_CYCLE) != if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
xhci->event_ring->cycle_state) { xhci->event_ring->cycle_state) {
xhci->error_bitmask |= 1 << 2; xhci->error_bitmask |= 1 << 2;
return; return 0;
} }
xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
* speculative reads of the event's flags/data below.
*/
rmb();
/* FIXME: Handle more event types. */ /* FIXME: Handle more event types. */
switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd); handle_cmd_completion(xhci, &event->event_cmd);
...@@ -2202,7 +2223,8 @@ static void xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2202,7 +2223,8 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
update_ptrs = 0; update_ptrs = 0;
break; break;
default: default:
if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48)) if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
TRB_TYPE(48))
handle_vendor_event(xhci, event); handle_vendor_event(xhci, event);
else else
xhci->error_bitmask |= 1 << 3; xhci->error_bitmask |= 1 << 3;
...@@ -2213,15 +2235,17 @@ static void xhci_handle_event(struct xhci_hcd *xhci) ...@@ -2213,15 +2235,17 @@ static void xhci_handle_event(struct xhci_hcd *xhci)
if (xhci->xhc_state & XHCI_STATE_DYING) { if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "xHCI host dying, returning from " xhci_dbg(xhci, "xHCI host dying, returning from "
"event handler.\n"); "event handler.\n");
return; return 0;
} }
if (update_ptrs) if (update_ptrs)
/* Update SW event ring dequeue pointer */ /* Update SW event ring dequeue pointer */
inc_deq(xhci, xhci->event_ring, true); inc_deq(xhci, xhci->event_ring, true);
/* Are there more items on the event ring? */ /* Are there more items on the event ring? Caller will call us again to
xhci_handle_event(xhci); * check.
*/
return 1;
} }
/* /*
...@@ -2252,12 +2276,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) ...@@ -2252,12 +2276,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
xhci_dbg(xhci, "op reg status = %08x\n", status); xhci_dbg(xhci, "op reg status = %08x\n", status);
xhci_dbg(xhci, "Event ring dequeue ptr:\n"); xhci_dbg(xhci, "Event ring dequeue ptr:\n");
xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
(unsigned long long) (unsigned long long)
xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
lower_32_bits(trb->link.segment_ptr), lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
upper_32_bits(trb->link.segment_ptr), upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
(unsigned int) trb->link.intr_target, (unsigned int) le32_to_cpu(trb->link.intr_target),
(unsigned int) trb->link.control); (unsigned int) le32_to_cpu(trb->link.control));
if (status & STS_FATAL) { if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_warn(xhci, "WARNING: Host System Error\n");
...@@ -2303,7 +2327,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) ...@@ -2303,7 +2327,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine /* FIXME this should be a delayed service routine
* that clears the EHB. * that clears the EHB.
*/ */
xhci_handle_event(xhci); while (xhci_handle_event(xhci) > 0) {}
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */ /* If necessary, update the HW's version of the event ring deq ptr. */
...@@ -2358,10 +2382,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -2358,10 +2382,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_generic_trb *trb; struct xhci_generic_trb *trb;
trb = &ring->enqueue->generic; trb = &ring->enqueue->generic;
trb->field[0] = field1; trb->field[0] = cpu_to_le32(field1);
trb->field[1] = field2; trb->field[1] = cpu_to_le32(field2);
trb->field[2] = field3; trb->field[2] = cpu_to_le32(field3);
trb->field[3] = field4; trb->field[3] = cpu_to_le32(field4);
inc_enq(xhci, ring, consumer, more_trbs_coming); inc_enq(xhci, ring, consumer, more_trbs_coming);
} }
...@@ -2414,17 +2438,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -2414,17 +2438,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
next = ring->enqueue; next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) { while (last_trb(xhci, ring, ring->enq_seg, next)) {
/* If we're not dealing with 0.95 hardware, /* If we're not dealing with 0.95 hardware,
* clear the chain bit. * clear the chain bit.
*/ */
if (!xhci_link_trb_quirk(xhci)) if (!xhci_link_trb_quirk(xhci))
next->link.control &= ~TRB_CHAIN; next->link.control &= cpu_to_le32(~TRB_CHAIN);
else else
next->link.control |= TRB_CHAIN; next->link.control |= cpu_to_le32(TRB_CHAIN);
wmb(); wmb();
next->link.control ^= (u32) TRB_CYCLE; next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */ /* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
...@@ -2467,8 +2490,8 @@ static int prepare_transfer(struct xhci_hcd *xhci, ...@@ -2467,8 +2490,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
} }
ret = prepare_ring(xhci, ep_ring, ret = prepare_ring(xhci, ep_ring,
ep_ctx->ep_info & EP_STATE_MASK, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags); num_trbs, mem_flags);
if (ret) if (ret)
return ret; return ret;
...@@ -2570,9 +2593,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, ...@@ -2570,9 +2593,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
*/ */
wmb(); wmb();
if (start_cycle) if (start_cycle)
start_trb->field[3] |= start_cycle; start_trb->field[3] |= cpu_to_le32(start_cycle);
else else
start_trb->field[3] &= ~0x1; start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
} }
...@@ -2590,7 +2613,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2590,7 +2613,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_interval; int xhci_interval;
int ep_interval; int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval; ep_interval = urb->interval;
/* Convert to microframes */ /* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW || if (urb->dev->speed == USB_SPEED_LOW ||
...@@ -2632,6 +2655,35 @@ static u32 xhci_td_remainder(unsigned int remainder) ...@@ -2632,6 +2655,35 @@ static u32 xhci_td_remainder(unsigned int remainder)
return (remainder >> 10) << 17; return (remainder >> 10) << 17;
} }
/*
* For xHCI 1.0 host controllers, TD size is the number of packets remaining in
* the TD (*not* including this TRB).
*
* Total TD packet count = total_packet_count =
* roundup(TD size in bytes / wMaxPacketSize)
*
* Packets transferred up to and including this TRB = packets_transferred =
* rounddown(total bytes transferred including this TRB / wMaxPacketSize)
*
* TD size = total_packet_count - packets_transferred
*
* It must fit in bits 21:17, so it can't be bigger than 31.
*/
static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
unsigned int total_packet_count, struct urb *urb)
{
int packets_transferred;
/* All the TRB queueing functions don't count the current TRB in
* running_total.
*/
packets_transferred = (running_total + trb_buff_len) /
le16_to_cpu(urb->ep->desc.wMaxPacketSize);
return xhci_td_remainder(total_packet_count - packets_transferred);
}
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index) struct urb *urb, int slot_id, unsigned int ep_index)
{ {
...@@ -2642,6 +2694,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2642,6 +2694,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct scatterlist *sg; struct scatterlist *sg;
int num_sgs; int num_sgs;
int trb_buff_len, this_sg_len, running_total; int trb_buff_len, this_sg_len, running_total;
unsigned int total_packet_count;
bool first_trb; bool first_trb;
u64 addr; u64 addr;
bool more_trbs_coming; bool more_trbs_coming;
...@@ -2655,6 +2708,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2655,6 +2708,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = count_sg_trbs_needed(xhci, urb); num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs; num_sgs = urb->num_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
le16_to_cpu(urb->ep->desc.wMaxPacketSize));
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id, ep_index, urb->stream_id,
...@@ -2718,6 +2773,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2718,6 +2773,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue; td->last_trb = ep_ring->enqueue;
field |= TRB_IOC; field |= TRB_IOC;
} }
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), " xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
"64KB boundary at %#x, end dma = %#x\n", "64KB boundary at %#x, end dma = %#x\n",
(unsigned int) addr, trb_buff_len, trb_buff_len, (unsigned int) addr, trb_buff_len, trb_buff_len,
...@@ -2730,11 +2790,20 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2730,11 +2790,20 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len); (unsigned int) addr + trb_buff_len);
} }
remainder = xhci_td_remainder(urb->transfer_buffer_length -
running_total) ; /* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) | length_field = TRB_LEN(trb_buff_len) |
remainder | remainder |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
if (num_trbs > 1) if (num_trbs > 1)
more_trbs_coming = true; more_trbs_coming = true;
else else
...@@ -2743,12 +2812,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2743,12 +2812,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
/* We always want to know if the TRB was short, field | TRB_TYPE(TRB_NORMAL));
* or we won't get an event when it completes.
* (Unless we use event data TRBs, which are a
* waste of space and HC resources.)
*/
field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
--num_trbs; --num_trbs;
running_total += trb_buff_len; running_total += trb_buff_len;
...@@ -2796,6 +2860,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2796,6 +2860,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field; u32 field, length_field;
int running_total, trb_buff_len, ret; int running_total, trb_buff_len, ret;
unsigned int total_packet_count;
u64 addr; u64 addr;
if (urb->num_sgs) if (urb->num_sgs)
...@@ -2850,6 +2915,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2850,6 +2915,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
start_cycle = ep_ring->cycle_state; start_cycle = ep_ring->cycle_state;
running_total = 0; running_total = 0;
total_packet_count = roundup(urb->transfer_buffer_length,
le16_to_cpu(urb->ep->desc.wMaxPacketSize));
/* How much data is in the first TRB? */ /* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma; addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE - trb_buff_len = TRB_MAX_BUFF_SIZE -
...@@ -2882,11 +2949,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2882,11 +2949,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue; td->last_trb = ep_ring->enqueue;
field |= TRB_IOC; field |= TRB_IOC;
} }
remainder = xhci_td_remainder(urb->transfer_buffer_length -
running_total); /* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
urb->transfer_buffer_length -
running_total);
} else {
remainder = xhci_v1_0_td_remainder(running_total,
trb_buff_len, total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) | length_field = TRB_LEN(trb_buff_len) |
remainder | remainder |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
if (num_trbs > 1) if (num_trbs > 1)
more_trbs_coming = true; more_trbs_coming = true;
else else
...@@ -2895,12 +2975,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2895,12 +2975,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
/* We always want to know if the TRB was short, field | TRB_TYPE(TRB_NORMAL));
* or we won't get an event when it completes.
* (Unless we use event data TRBs, which are a
* waste of space and HC resources.)
*/
field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
--num_trbs; --num_trbs;
running_total += trb_buff_len; running_total += trb_buff_len;
...@@ -2979,15 +3054,19 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2979,15 +3054,19 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (start_cycle == 0) if (start_cycle == 0)
field |= 0x1; field |= 0x1;
queue_trb(xhci, ep_ring, false, true, queue_trb(xhci, ep_ring, false, true,
/* FIXME endianness is probably going to bite my ass here. */ setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
setup->wIndex | setup->wLength << 16, TRB_LEN(8) | TRB_INTR_TARGET(0),
TRB_LEN(8) | TRB_INTR_TARGET(0), /* Immediate data in pointer */
/* Immediate data in pointer */ field);
field);
/* If there's data, queue data TRBs */ /* If there's data, queue data TRBs */
field = 0; /* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field = TRB_ISP | TRB_TYPE(TRB_DATA);
else
field = TRB_TYPE(TRB_DATA);
length_field = TRB_LEN(urb->transfer_buffer_length) | length_field = TRB_LEN(urb->transfer_buffer_length) |
xhci_td_remainder(urb->transfer_buffer_length) | xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
...@@ -2998,8 +3077,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -2998,8 +3077,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
lower_32_bits(urb->transfer_dma), lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma),
length_field, length_field,
/* Event on short tx */ field | ep_ring->cycle_state);
field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
} }
/* Save the DMA address of the last TRB in the TD */ /* Save the DMA address of the last TRB in the TD */
...@@ -3045,6 +3123,63 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, ...@@ -3045,6 +3123,63 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
return num_trbs; return num_trbs;
} }
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
* devices can burst up to bMaxBurst number of packets per service interval.
* This field is zero based, meaning a value of zero in the field means one
* burst. Basically, for everything but SuperSpeed devices, this field will be
* zero. Only xHCI 1.0 host controllers support this field.
*/
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
return roundup(total_packet_count, max_burst + 1) - 1;
}
/*
* Returns the number of packets in the last "burst" of packets. This field is
* valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
* the last burst packet count is equal to the total number of packets in the
* TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
* must contain (bMaxBurst + 1) number of packets, but the last burst can
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
unsigned int residue;
if (xhci->hci_version < 0x100)
return 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/* If residue is zero, the last burst contains (max_burst + 1)
* number of packets, but the TLBPC field is zero-based.
*/
if (residue == 0)
return max_burst;
return residue - 1;
default:
if (total_packet_count == 0)
return 0;
return total_packet_count - 1;
}
}
/* This is for isoc transfer */ /* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index) struct urb *urb, int slot_id, unsigned int ep_index)
...@@ -3085,12 +3220,22 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3085,12 +3220,22 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue the first TRB, even if it's zero-length */ /* Queue the first TRB, even if it's zero-length */
for (i = 0; i < num_tds; i++) { for (i = 0; i < num_tds; i++) {
first_trb = true; unsigned int total_packet_count;
unsigned int burst_count;
unsigned int residue;
first_trb = true;
running_total = 0; running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset; addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length; td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len; td_remain_len = td_len;
/* FIXME: Ignoring zero-length packets, can those happen? */
total_packet_count = roundup(td_len,
le16_to_cpu(urb->ep->desc.wMaxPacketSize));
burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
total_packet_count);
residue = xhci_get_last_burst_packet_count(xhci,
urb->dev, urb, total_packet_count);
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i); trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
...@@ -3104,7 +3249,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3104,7 +3249,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
for (j = 0; j < trbs_per_td; j++) { for (j = 0; j < trbs_per_td; j++) {
u32 remainder = 0; u32 remainder = 0;
field = 0; field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
if (first_trb) { if (first_trb) {
/* Queue the isoc TRB */ /* Queue the isoc TRB */
...@@ -3123,6 +3268,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3123,6 +3268,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= ep_ring->cycle_state; field |= ep_ring->cycle_state;
} }
/* Only set interrupt on short packet for IN EPs */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Chain all the TRBs together; clear the chain bit in /* Chain all the TRBs together; clear the chain bit in
* the last TRB to indicate it's the last TRB in the * the last TRB to indicate it's the last TRB in the
* chain. * chain.
...@@ -3142,20 +3291,24 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3142,20 +3291,24 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (trb_buff_len > td_remain_len) if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len; trb_buff_len = td_remain_len;
remainder = xhci_td_remainder(td_len - running_total); /* Set the TRB length, TD size, & interrupter fields. */
if (xhci->hci_version < 0x100) {
remainder = xhci_td_remainder(
td_len - running_total);
} else {
remainder = xhci_v1_0_td_remainder(
running_total, trb_buff_len,
total_packet_count, urb);
}
length_field = TRB_LEN(trb_buff_len) | length_field = TRB_LEN(trb_buff_len) |
remainder | remainder |
TRB_INTR_TARGET(0); TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, more_trbs_coming, queue_trb(xhci, ep_ring, false, more_trbs_coming,
lower_32_bits(addr), lower_32_bits(addr),
upper_32_bits(addr), upper_32_bits(addr),
length_field, length_field,
/* We always want to know if the TRB was short, field);
* or we won't get an event when it completes.
* (Unless we use event data TRBs, which are a
* waste of space and HC resources.)
*/
field | TRB_ISP);
running_total += trb_buff_len; running_total += trb_buff_len;
addr += trb_buff_len; addr += trb_buff_len;
...@@ -3211,8 +3364,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3211,8 +3364,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check the ring to guarantee there is enough room for the whole urb. /* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed. * Do not insert any td of the urb to the ring if the check failed.
*/ */
ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK, ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
num_trbs, mem_flags); num_trbs, mem_flags);
if (ret) if (ret)
return ret; return ret;
...@@ -3224,7 +3377,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -3224,7 +3377,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL) urb->dev->speed == USB_SPEED_FULL)
urb->start_frame >>= 3; urb->start_frame >>= 3;
xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info); xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval; ep_interval = urb->interval;
/* Convert to microframes */ /* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW || if (urb->dev->speed == USB_SPEED_LOW ||
......
...@@ -973,8 +973,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, ...@@ -973,8 +973,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
out_ctx = xhci->devs[slot_id]->out_ctx; out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
if (hw_max_packet_size != max_packet_size) { if (hw_max_packet_size != max_packet_size) {
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
xhci_dbg(xhci, "Max packet size in usb_device = %d\n", xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
...@@ -988,15 +988,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, ...@@ -988,15 +988,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
xhci->devs[slot_id]->out_ctx, ep_index); xhci->devs[slot_id]->out_ctx, ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx; in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
/* Set up the input context flags for the command */ /* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint /* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes. * changes max packet sizes.
*/ */
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = EP0_FLAG; ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0; ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id); xhci_dbg(xhci, "Slot %d input context\n", slot_id);
...@@ -1010,7 +1010,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, ...@@ -1010,7 +1010,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
/* Clean up the input context for later use by bandwidth /* Clean up the input context for later use by bandwidth
* functions. * functions.
*/ */
ctrl_ctx->add_flags = SLOT_FLAG; ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
} }
return ret; return ret;
} }
...@@ -1331,27 +1331,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -1331,27 +1331,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HC already knows the endpoint is disabled, /* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request * or the HCD has noted it is disabled, ignore this request
*/ */
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { EP_STATE_DISABLED ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, ep); __func__, ep);
return 0; return 0;
} }
ctrl_ctx->drop_flags |= drop_flag; ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
new_drop_flags = ctrl_ctx->drop_flags; new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
ctrl_ctx->add_flags &= ~drop_flag; ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
new_add_flags = ctrl_ctx->add_flags; new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we deleted the last one */ /* Update the last valid endpoint context, if we deleted the last one */
if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
slot_ctx->dev_info &= ~LAST_CTX_MASK; LAST_CTX(last_ctx)) {
slot_ctx->dev_info |= LAST_CTX(last_ctx); slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
} }
new_slot_info = slot_ctx->dev_info; new_slot_info = le32_to_cpu(slot_ctx->dev_info);
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
...@@ -1419,7 +1422,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -1419,7 +1422,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HCD has already noted the endpoint is enabled, /* If the HCD has already noted the endpoint is enabled,
* ignore this request. * ignore this request.
*/ */
if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { if (le32_to_cpu(ctrl_ctx->add_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep); __func__, ep);
return 0; return 0;
...@@ -1437,8 +1441,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -1437,8 +1441,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return -ENOMEM; return -ENOMEM;
} }
ctrl_ctx->add_flags |= added_ctxs; ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
new_add_flags = ctrl_ctx->add_flags; new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* If xhci_endpoint_disable() was called for this endpoint, but the /* If xhci_endpoint_disable() was called for this endpoint, but the
* xHC hasn't been notified yet through the check_bandwidth() call, * xHC hasn't been notified yet through the check_bandwidth() call,
...@@ -1446,15 +1450,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -1446,15 +1450,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* descriptors. We must drop and re-add this endpoint, so we leave the * descriptors. We must drop and re-add this endpoint, so we leave the
* drop flags alone. * drop flags alone.
*/ */
new_drop_flags = ctrl_ctx->drop_flags; new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we just added one past */ /* Update the last valid endpoint context, if we just added one past */
if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
slot_ctx->dev_info &= ~LAST_CTX_MASK; LAST_CTX(last_ctx)) {
slot_ctx->dev_info |= LAST_CTX(last_ctx); slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
} }
new_slot_info = slot_ctx->dev_info; new_slot_info = le32_to_cpu(slot_ctx->dev_info);
/* Store the usb_device pointer for later use */ /* Store the usb_device pointer for later use */
ep->hcpriv = udev; ep->hcpriv = udev;
...@@ -1484,9 +1489,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir ...@@ -1484,9 +1489,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
ctrl_ctx->drop_flags = 0; ctrl_ctx->drop_flags = 0;
ctrl_ctx->add_flags = 0; ctrl_ctx->add_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
slot_ctx->dev_info &= ~LAST_CTX_MASK; slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
/* Endpoint 0 is always valid */ /* Endpoint 0 is always valid */
slot_ctx->dev_info |= LAST_CTX(1); slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
for (i = 1; i < 31; ++i) { for (i = 1; i < 31; ++i) {
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
ep_ctx->ep_info = 0; ep_ctx->ep_info = 0;
...@@ -1497,7 +1502,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir ...@@ -1497,7 +1502,7 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
} }
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
struct usb_device *udev, int *cmd_status) struct usb_device *udev, u32 *cmd_status)
{ {
int ret; int ret;
...@@ -1535,7 +1540,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, ...@@ -1535,7 +1540,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
} }
static int xhci_evaluate_context_result(struct xhci_hcd *xhci, static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, int *cmd_status) struct usb_device *udev, u32 *cmd_status)
{ {
int ret; int ret;
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
...@@ -1581,7 +1586,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, ...@@ -1581,7 +1586,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
unsigned long flags; unsigned long flags;
struct xhci_container_ctx *in_ctx; struct xhci_container_ctx *in_ctx;
struct completion *cmd_completion; struct completion *cmd_completion;
int *cmd_status; u32 *cmd_status;
struct xhci_virt_device *virt_dev; struct xhci_virt_device *virt_dev;
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
...@@ -1595,8 +1600,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, ...@@ -1595,8 +1600,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
/* Enqueue pointer can be left pointing to the link TRB, /* Enqueue pointer can be left pointing to the link TRB,
* we must handle that * we must handle that
*/ */
if ((command->command_trb->link.control & TRB_TYPE_BITMASK) if ((le32_to_cpu(command->command_trb->link.control)
== TRB_TYPE(TRB_LINK)) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
command->command_trb = command->command_trb =
xhci->cmd_ring->enq_seg->next->trbs; xhci->cmd_ring->enq_seg->next->trbs;
...@@ -1672,14 +1677,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1672,14 +1677,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags |= SLOT_FLAG; ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
ctrl_ctx->add_flags &= ~EP0_FLAG; ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
ctrl_ctx->drop_flags &= ~SLOT_FLAG; ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
ctrl_ctx->drop_flags &= ~EP0_FLAG;
xhci_dbg(xhci, "New Input Control Context:\n"); xhci_dbg(xhci, "New Input Control Context:\n");
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
ret = xhci_configure_endpoint(xhci, udev, NULL, ret = xhci_configure_endpoint(xhci, udev, NULL,
false, false); false, false);
...@@ -1690,7 +1694,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1690,7 +1694,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, xhci_dbg_ctx(xhci, virt_dev->out_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
xhci_zero_in_ctx(xhci, virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
/* Install new rings and free or cache any old rings */ /* Install new rings and free or cache any old rings */
...@@ -1740,10 +1744,10 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ...@@ -1740,10 +1744,10 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
{ {
struct xhci_input_control_ctx *ctrl_ctx; struct xhci_input_control_ctx *ctrl_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = add_flags; ctrl_ctx->add_flags = cpu_to_le32(add_flags);
ctrl_ctx->drop_flags = drop_flags; ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
xhci_slot_copy(xhci, in_ctx, out_ctx); xhci_slot_copy(xhci, in_ctx, out_ctx);
ctrl_ctx->add_flags |= SLOT_FLAG; ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
xhci_dbg(xhci, "Input Context:\n"); xhci_dbg(xhci, "Input Context:\n");
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
...@@ -1772,7 +1776,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, ...@@ -1772,7 +1776,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
deq_state->new_deq_ptr); deq_state->new_deq_ptr);
return; return;
} }
ep_ctx->deq = addr | deq_state->new_cycle_state; ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
...@@ -2327,8 +2331,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -2327,8 +2331,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Enqueue pointer can be left pointing to the link TRB, /* Enqueue pointer can be left pointing to the link TRB,
* we must handle that * we must handle that
*/ */
if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
== TRB_TYPE(TRB_LINK)) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
reset_device_cmd->command_trb = reset_device_cmd->command_trb =
xhci->cmd_ring->enq_seg->next->trbs; xhci->cmd_ring->enq_seg->next->trbs;
...@@ -2542,6 +2546,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -2542,6 +2546,17 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev = xhci->devs[udev->slot_id]; virt_dev = xhci->devs[udev->slot_id];
if (WARN_ON(!virt_dev)) {
/*
* In plug/unplug torture test with an NEC controller,
* a zero-dereference was observed once due to virt_dev = 0.
* Print useful debug rather than crash if it is observed again!
*/
xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
udev->slot_id);
return -EINVAL;
}
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
/* /*
* If this is the first Set Address since device plug-in or * If this is the first Set Address since device plug-in or
...@@ -2609,10 +2624,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -2609,10 +2624,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
udev->slot_id, udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[udev->slot_id], &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
(unsigned long long) (unsigned long long)
xhci->dcbaa->dev_context_ptrs[udev->slot_id]); le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
(unsigned long long)virt_dev->out_ctx->dma); (unsigned long long)virt_dev->out_ctx->dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
...@@ -2626,7 +2641,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -2626,7 +2641,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
/* Use kernel assigned address for devices; store xHC assigned /* Use kernel assigned address for devices; store xHC assigned
* address locally. */ * address locally. */
virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
+ 1;
/* Zero the input context control for later use */ /* Zero the input context control for later use */
ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags = 0; ctrl_ctx->add_flags = 0;
...@@ -2670,16 +2686,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, ...@@ -2670,16 +2686,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
ctrl_ctx->add_flags |= SLOT_FLAG; ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
slot_ctx->dev_info |= DEV_HUB; slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
if (tt->multi) if (tt->multi)
slot_ctx->dev_info |= DEV_MTT; slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
if (xhci->hci_version > 0x95) { if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub " xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n", "TT think time and number of ports\n",
(unsigned int) xhci->hci_version); (unsigned int) xhci->hci_version);
slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
/* Set TT think time - convert from ns to FS bit times. /* Set TT think time - convert from ns to FS bit times.
* 0 = 8 FS bit times, 1 = 16 FS bit times, * 0 = 8 FS bit times, 1 = 16 FS bit times,
* 2 = 24 FS bit times, 3 = 32 FS bit times. * 2 = 24 FS bit times, 3 = 32 FS bit times.
...@@ -2687,7 +2703,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, ...@@ -2687,7 +2703,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
think_time = tt->think_time; think_time = tt->think_time;
if (think_time != 0) if (think_time != 0)
think_time = (think_time / 666) - 1; think_time = (think_time / 666) - 1;
slot_ctx->tt_info |= TT_THINK_TIME(think_time); slot_ctx->tt_info |= cpu_to_le32(TT_THINK_TIME(think_time));
} else { } else {
xhci_dbg(xhci, "xHCI version %x doesn't need hub " xhci_dbg(xhci, "xHCI version %x doesn't need hub "
"TT think time or number of ports\n", "TT think time or number of ports\n",
......
...@@ -57,13 +57,13 @@ ...@@ -57,13 +57,13 @@
* @run_regs_off: RTSOFF - Runtime register space offset * @run_regs_off: RTSOFF - Runtime register space offset
*/ */
struct xhci_cap_regs { struct xhci_cap_regs {
u32 hc_capbase; __le32 hc_capbase;
u32 hcs_params1; __le32 hcs_params1;
u32 hcs_params2; __le32 hcs_params2;
u32 hcs_params3; __le32 hcs_params3;
u32 hcc_params; __le32 hcc_params;
u32 db_off; __le32 db_off;
u32 run_regs_off; __le32 run_regs_off;
/* Reserved up to (CAPLENGTH - 0x1C) */ /* Reserved up to (CAPLENGTH - 0x1C) */
}; };
...@@ -155,26 +155,26 @@ struct xhci_cap_regs { ...@@ -155,26 +155,26 @@ struct xhci_cap_regs {
* devices. * devices.
*/ */
struct xhci_op_regs { struct xhci_op_regs {
u32 command; __le32 command;
u32 status; __le32 status;
u32 page_size; __le32 page_size;
u32 reserved1; __le32 reserved1;
u32 reserved2; __le32 reserved2;
u32 dev_notification; __le32 dev_notification;
u64 cmd_ring; __le64 cmd_ring;
/* rsvd: offset 0x20-2F */ /* rsvd: offset 0x20-2F */
u32 reserved3[4]; __le32 reserved3[4];
u64 dcbaa_ptr; __le64 dcbaa_ptr;
u32 config_reg; __le32 config_reg;
/* rsvd: offset 0x3C-3FF */ /* rsvd: offset 0x3C-3FF */
u32 reserved4[241]; __le32 reserved4[241];
/* port 1 registers, which serve as a base address for other ports */ /* port 1 registers, which serve as a base address for other ports */
u32 port_status_base; __le32 port_status_base;
u32 port_power_base; __le32 port_power_base;
u32 port_link_base; __le32 port_link_base;
u32 reserved5; __le32 reserved5;
/* registers for ports 2-255 */ /* registers for ports 2-255 */
u32 reserved6[NUM_PORT_REGS*254]; __le32 reserved6[NUM_PORT_REGS*254];
}; };
/* USBCMD - USB command - command bitmasks */ /* USBCMD - USB command - command bitmasks */
...@@ -382,12 +382,12 @@ struct xhci_op_regs { ...@@ -382,12 +382,12 @@ struct xhci_op_regs {
* updates the dequeue pointer. * updates the dequeue pointer.
*/ */
struct xhci_intr_reg { struct xhci_intr_reg {
u32 irq_pending; __le32 irq_pending;
u32 irq_control; __le32 irq_control;
u32 erst_size; __le32 erst_size;
u32 rsvd; __le32 rsvd;
u64 erst_base; __le64 erst_base;
u64 erst_dequeue; __le64 erst_dequeue;
}; };
/* irq_pending bitmasks */ /* irq_pending bitmasks */
...@@ -432,8 +432,8 @@ struct xhci_intr_reg { ...@@ -432,8 +432,8 @@ struct xhci_intr_reg {
* or larger accesses" * or larger accesses"
*/ */
struct xhci_run_regs { struct xhci_run_regs {
u32 microframe_index; __le32 microframe_index;
u32 rsvd[7]; __le32 rsvd[7];
struct xhci_intr_reg ir_set[128]; struct xhci_intr_reg ir_set[128];
}; };
...@@ -447,7 +447,7 @@ struct xhci_run_regs { ...@@ -447,7 +447,7 @@ struct xhci_run_regs {
* Section 5.6 * Section 5.6
*/ */
struct xhci_doorbell_array { struct xhci_doorbell_array {
u32 doorbell[256]; __le32 doorbell[256];
}; };
#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) #define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
...@@ -504,12 +504,12 @@ struct xhci_container_ctx { ...@@ -504,12 +504,12 @@ struct xhci_container_ctx {
* reserved at the end of the slot context for HC internal use. * reserved at the end of the slot context for HC internal use.
*/ */
struct xhci_slot_ctx { struct xhci_slot_ctx {
u32 dev_info; __le32 dev_info;
u32 dev_info2; __le32 dev_info2;
u32 tt_info; __le32 tt_info;
u32 dev_state; __le32 dev_state;
/* offset 0x10 to 0x1f reserved for HC internal use */ /* offset 0x10 to 0x1f reserved for HC internal use */
u32 reserved[4]; __le32 reserved[4];
}; };
/* dev_info bitmasks */ /* dev_info bitmasks */
...@@ -580,12 +580,12 @@ struct xhci_slot_ctx { ...@@ -580,12 +580,12 @@ struct xhci_slot_ctx {
* reserved at the end of the endpoint context for HC internal use. * reserved at the end of the endpoint context for HC internal use.
*/ */
struct xhci_ep_ctx { struct xhci_ep_ctx {
u32 ep_info; __le32 ep_info;
u32 ep_info2; __le32 ep_info2;
u64 deq; __le64 deq;
u32 tx_info; __le32 tx_info;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[3]; __le32 reserved[3];
}; };
/* ep_info bitmasks */ /* ep_info bitmasks */
...@@ -660,9 +660,9 @@ struct xhci_ep_ctx { ...@@ -660,9 +660,9 @@ struct xhci_ep_ctx {
* @add_context: set the bit of the endpoint context you want to enable * @add_context: set the bit of the endpoint context you want to enable
*/ */
struct xhci_input_control_ctx { struct xhci_input_control_ctx {
u32 drop_flags; __le32 drop_flags;
u32 add_flags; __le32 add_flags;
u32 rsvd2[6]; __le32 rsvd2[6];
}; };
/* Represents everything that is needed to issue a command on the command ring. /* Represents everything that is needed to issue a command on the command ring.
...@@ -688,9 +688,9 @@ struct xhci_command { ...@@ -688,9 +688,9 @@ struct xhci_command {
struct xhci_stream_ctx { struct xhci_stream_ctx {
/* 64-bit stream ring address, cycle state, and stream type */ /* 64-bit stream ring address, cycle state, and stream type */
u64 stream_ring; __le64 stream_ring;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[2]; __le32 reserved[2];
}; };
/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */ /* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
...@@ -803,7 +803,7 @@ struct xhci_virt_device { ...@@ -803,7 +803,7 @@ struct xhci_virt_device {
*/ */
struct xhci_device_context_array { struct xhci_device_context_array {
/* 64-bit device addresses; we only write 32-bit addresses */ /* 64-bit device addresses; we only write 32-bit addresses */
u64 dev_context_ptrs[MAX_HC_SLOTS]; __le64 dev_context_ptrs[MAX_HC_SLOTS];
/* private xHCD pointers */ /* private xHCD pointers */
dma_addr_t dma; dma_addr_t dma;
}; };
...@@ -816,10 +816,10 @@ struct xhci_device_context_array { ...@@ -816,10 +816,10 @@ struct xhci_device_context_array {
struct xhci_transfer_event { struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */ /* 64-bit buffer address, or immediate data */
u64 buffer; __le64 buffer;
u32 transfer_len; __le32 transfer_len;
/* This field is interpreted differently based on the type of TRB */ /* This field is interpreted differently based on the type of TRB */
u32 flags; __le32 flags;
}; };
/** Transfer Event bit fields **/ /** Transfer Event bit fields **/
...@@ -898,9 +898,9 @@ struct xhci_transfer_event { ...@@ -898,9 +898,9 @@ struct xhci_transfer_event {
struct xhci_link_trb { struct xhci_link_trb {
/* 64-bit segment pointer*/ /* 64-bit segment pointer*/
u64 segment_ptr; __le64 segment_ptr;
u32 intr_target; __le32 intr_target;
u32 control; __le32 control;
}; };
/* control bitfields */ /* control bitfields */
...@@ -909,9 +909,9 @@ struct xhci_link_trb { ...@@ -909,9 +909,9 @@ struct xhci_link_trb {
/* Command completion event TRB */ /* Command completion event TRB */
struct xhci_event_cmd { struct xhci_event_cmd {
/* Pointer to command TRB, or the value passed by the event data trb */ /* Pointer to command TRB, or the value passed by the event data trb */
u64 cmd_trb; __le64 cmd_trb;
u32 status; __le32 status;
u32 flags; __le32 flags;
}; };
/* flags bitmasks */ /* flags bitmasks */
...@@ -943,6 +943,8 @@ struct xhci_event_cmd { ...@@ -943,6 +943,8 @@ struct xhci_event_cmd {
/* Interrupter Target - which MSI-X vector to target the completion event at */ /* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
#define TRB_TBC(p) (((p) & 0x3) << 7)
#define TRB_TLBPC(p) (((p) & 0xf) << 16)
/* Cycle bit - indicates TRB ownership by HC or HCD */ /* Cycle bit - indicates TRB ownership by HC or HCD */
#define TRB_CYCLE (1<<0) #define TRB_CYCLE (1<<0)
...@@ -970,7 +972,7 @@ struct xhci_event_cmd { ...@@ -970,7 +972,7 @@ struct xhci_event_cmd {
#define TRB_SIA (1<<31) #define TRB_SIA (1<<31)
struct xhci_generic_trb { struct xhci_generic_trb {
u32 field[4]; __le32 field[4];
}; };
union xhci_trb { union xhci_trb {
...@@ -1118,10 +1120,10 @@ struct xhci_ring { ...@@ -1118,10 +1120,10 @@ struct xhci_ring {
struct xhci_erst_entry { struct xhci_erst_entry {
/* 64-bit event ring segment address */ /* 64-bit event ring segment address */
u64 seg_addr; __le64 seg_addr;
u32 seg_size; __le32 seg_size;
/* Set to zero */ /* Set to zero */
u32 rsvd; __le32 rsvd;
}; };
struct xhci_erst { struct xhci_erst {
...@@ -1286,10 +1288,10 @@ struct xhci_hcd { ...@@ -1286,10 +1288,10 @@ struct xhci_hcd {
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
u8 *port_array; u8 *port_array;
/* Array of pointers to USB 3.0 PORTSC registers */ /* Array of pointers to USB 3.0 PORTSC registers */
u32 __iomem **usb3_ports; __le32 __iomem **usb3_ports;
unsigned int num_usb3_ports; unsigned int num_usb3_ports;
/* Array of pointers to USB 2.0 PORTSC registers */ /* Array of pointers to USB 2.0 PORTSC registers */
u32 __iomem **usb2_ports; __le32 __iomem **usb2_ports;
unsigned int num_usb2_ports; unsigned int num_usb2_ports;
}; };
...@@ -1322,12 +1324,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) ...@@ -1322,12 +1324,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
/* TODO: copied from ehci.h - can be refactored? */ /* TODO: copied from ehci.h - can be refactored? */
/* xHCI spec says all registers are little endian */ /* xHCI spec says all registers are little endian */
static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
__u32 __iomem *regs) __le32 __iomem *regs)
{ {
return readl(regs); return readl(regs);
} }
static inline void xhci_writel(struct xhci_hcd *xhci, static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __u32 __iomem *regs) const unsigned int val, __le32 __iomem *regs)
{ {
xhci_dbg(xhci, xhci_dbg(xhci,
"`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
...@@ -1345,7 +1347,7 @@ static inline void xhci_writel(struct xhci_hcd *xhci, ...@@ -1345,7 +1347,7 @@ static inline void xhci_writel(struct xhci_hcd *xhci,
* the high dword, and write order is irrelevant. * the high dword, and write order is irrelevant.
*/ */
static inline u64 xhci_read_64(const struct xhci_hcd *xhci, static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
__u64 __iomem *regs) __le64 __iomem *regs)
{ {
__u32 __iomem *ptr = (__u32 __iomem *) regs; __u32 __iomem *ptr = (__u32 __iomem *) regs;
u64 val_lo = readl(ptr); u64 val_lo = readl(ptr);
...@@ -1353,7 +1355,7 @@ static inline u64 xhci_read_64(const struct xhci_hcd *xhci, ...@@ -1353,7 +1355,7 @@ static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
return val_lo + (val_hi << 32); return val_lo + (val_hi << 32);
} }
static inline void xhci_write_64(struct xhci_hcd *xhci, static inline void xhci_write_64(struct xhci_hcd *xhci,
const u64 val, __u64 __iomem *regs) const u64 val, __le64 __iomem *regs)
{ {
__u32 __iomem *ptr = (__u32 __iomem *) regs; __u32 __iomem *ptr = (__u32 __iomem *) regs;
u32 val_lo = lower_32_bits(val); u32 val_lo = lower_32_bits(val);
......
...@@ -579,7 +579,7 @@ struct usb_ss_ep_comp_descriptor { ...@@ -579,7 +579,7 @@ struct usb_ss_ep_comp_descriptor {
__u8 bMaxBurst; __u8 bMaxBurst;
__u8 bmAttributes; __u8 bmAttributes;
__u16 wBytesPerInterval; __le16 wBytesPerInterval;
} __attribute__ ((packed)); } __attribute__ ((packed));
#define USB_DT_SS_EP_COMP_SIZE 6 #define USB_DT_SS_EP_COMP_SIZE 6
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment