Commit c9050b64 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'for-usb-next-2014-03-06' of...

Merge tag 'for-usb-next-2014-03-06' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next

Sarah writes:

xhci: Streams and UAS cleanups, misc cleanups for 3.15

Hi Greg,

Here's 76 patches to queue to usb-next for 3.15.

The bulk of this rather large pull request is the UAS driver cleanup, the
xHCI streams fixes, and the new userspace API for usbfs to be able to use
and alloc/free bulk streams.  I've hammered on these changes, and the UAS
driver seems solid.  The performance numbers are pretty spiffy too:

root@xanatos:~# echo 3 > /proc/sys/vm/drop_caches; dd if=/dev/sdb of=/dev/null bs=4k count=1000M iflag=count_bytes
256000+0 records in
256000+0 records out
1048576000 bytes (1.0 GB) copied, 3.28557 s, 319 MB/s

That's about 100 MB/s faster than my fastest Bulk-only-Transport mass
storage drive.

There's a couple of miscellaneous cleanup patches and non-urgent bug fixes
in here as well:

79699437 xhci: add the meaningful IRQ description if it is empty
bcffae77 xhci: Prevent runtime pm from autosuspending during initialization
e587b8b2 xhci: make warnings greppable
25cd2882 usb/xhci: Change how we indicate a host supports Link PM.

Sarah Sharp
parents 86e2864d 79699437
......@@ -9063,8 +9063,7 @@ S: Maintained
F: drivers/net/wireless/ath/ar5523/
USB ATTACHED SCSI
M: Matthew Wilcox <willy@linux.intel.com>
M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
M: Hans de Goede <hdegoede@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
L: linux-usb@vger.kernel.org
L: linux-scsi@vger.kernel.org
......
......@@ -10,7 +10,6 @@
#define USB_MAXALTSETTING 128 /* Hard limit */
#define USB_MAXENDPOINTS 30 /* Hard limit */
#define USB_MAXCONFIG 8 /* Arbitrary limit */
......
......@@ -769,6 +769,88 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
return ret;
}
static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev,
unsigned char ep)
{
if (ep & USB_ENDPOINT_DIR_MASK)
return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK];
else
return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK];
}
static int parse_usbdevfs_streams(struct dev_state *ps,
struct usbdevfs_streams __user *streams,
unsigned int *num_streams_ret,
unsigned int *num_eps_ret,
struct usb_host_endpoint ***eps_ret,
struct usb_interface **intf_ret)
{
unsigned int i, num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf = NULL;
unsigned char ep;
int ifnum, ret;
if (get_user(num_streams, &streams->num_streams) ||
get_user(num_eps, &streams->num_eps))
return -EFAULT;
if (num_eps < 1 || num_eps > USB_MAXENDPOINTS)
return -EINVAL;
/* The XHCI controller allows max 2 ^ 16 streams */
if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
return -EINVAL;
eps = kmalloc(num_eps * sizeof(*eps), GFP_KERNEL);
if (!eps)
return -ENOMEM;
for (i = 0; i < num_eps; i++) {
if (get_user(ep, &streams->eps[i])) {
ret = -EFAULT;
goto error;
}
eps[i] = ep_to_host_endpoint(ps->dev, ep);
if (!eps[i]) {
ret = -EINVAL;
goto error;
}
/* usb_alloc/free_streams operate on an usb_interface */
ifnum = findintfep(ps->dev, ep);
if (ifnum < 0) {
ret = ifnum;
goto error;
}
if (i == 0) {
ret = checkintf(ps, ifnum);
if (ret < 0)
goto error;
intf = usb_ifnum_to_if(ps->dev, ifnum);
} else {
/* Verify all eps belong to the same interface */
if (ifnum != intf->altsetting->desc.bInterfaceNumber) {
ret = -EINVAL;
goto error;
}
}
}
if (num_streams_ret)
*num_streams_ret = num_streams;
*num_eps_ret = num_eps;
*eps_ret = eps;
*intf_ret = intf;
return 0;
error:
kfree(eps);
return ret;
}
static int match_devt(struct device *dev, void *data)
{
return dev->devt == (dev_t) (unsigned long) data;
......@@ -1143,6 +1225,9 @@ static int proc_setintf(struct dev_state *ps, void __user *arg)
return -EFAULT;
if ((ret = checkintf(ps, setintf.interface)))
return ret;
destroy_async_on_interface(ps, setintf.interface);
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
......@@ -1205,6 +1290,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int i, ret, is_in, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
......@@ -1225,15 +1312,10 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (ret)
return ret;
}
if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
is_in = 1;
ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
} else {
is_in = 0;
ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
}
ep = ep_to_host_endpoint(ps->dev, uurb->endpoint);
if (!ep)
return -ENOENT;
is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
switch(uurb->type) {
......@@ -1258,7 +1340,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
le16_to_cpup(&dr->wIndex));
if (ret)
goto error;
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
......@@ -1288,17 +1369,17 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
uurb->number_of_packets = 0;
num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
num_sgs = 0;
if (ep->streams)
stream_id = uurb->stream_id;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
break;
case USBDEVFS_URB_TYPE_ISO:
......@@ -1308,15 +1389,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
uurb->number_of_packets;
number_of_packets;
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
ret = -EFAULT;
goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
for (totlen = u = 0; u < number_of_packets; u++) {
/*
* arbitrary limit need for USB 3.0
* bMaxBurst (0~15 allowed, 1~16 packets)
......@@ -1347,7 +1429,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
ret = -EFAULT;
goto error;
}
as = alloc_async(uurb->number_of_packets);
as = alloc_async(number_of_packets);
if (!as) {
ret = -ENOMEM;
goto error;
......@@ -1441,7 +1523,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH)
as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
......@@ -1449,7 +1532,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->interval = ep->desc.bInterval;
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
for (totlen = u = 0; u < number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
......@@ -1999,6 +2082,45 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
return claimintf(ps, dc.interface);
}
static int proc_alloc_streams(struct dev_state *ps, void __user *arg)
{
unsigned num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps,
&eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL);
kfree(eps);
return r;
}
static int proc_free_streams(struct dev_state *ps, void __user *arg)
{
unsigned num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL);
kfree(eps);
return r;
}
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
......@@ -2175,6 +2297,12 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_DISCONNECT_CLAIM:
ret = proc_disconnect_claim(ps, p);
break;
case USBDEVFS_ALLOC_STREAMS:
ret = proc_alloc_streams(ps, p);
break;
case USBDEVFS_FREE_STREAMS:
ret = proc_free_streams(ps, p);
break;
}
usb_unlock_device(dev);
if (ret >= 0)
......
......@@ -400,8 +400,9 @@ static int usb_unbind_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
int error, r, lpm_disable_error;
int i, j, error, r, lpm_disable_error;
intf->condition = USB_INTERFACE_UNBINDING;
......@@ -425,6 +426,26 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
usb_cancel_queued_reset(intf);
/* Free streams */
for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
ep = &intf->cur_altsetting->endpoint[i];
if (ep->streams == 0)
continue;
if (j == 0) {
eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *),
GFP_KERNEL);
if (!eps) {
dev_warn(dev, "oom, leaking streams\n");
break;
}
}
eps[j++] = ep;
}
if (j) {
usb_free_streams(intf, eps, j, GFP_KERNEL);
kfree(eps);
}
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
* if it is prepared for a system sleep (since installing a new
......
......@@ -2049,7 +2049,7 @@ int usb_alloc_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
......@@ -2058,13 +2058,24 @@ int usb_alloc_streams(struct usb_interface *interface,
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
/* Streams only apply to bulk endpoints. */
for (i = 0; i < num_eps; i++)
for (i = 0; i < num_eps; i++) {
/* Streams only apply to bulk endpoints. */
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
return -EINVAL;
/* Re-alloc is not allowed */
if (eps[i]->streams)
return -EINVAL;
}
return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
num_streams, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = ret;
return ret;
}
EXPORT_SYMBOL_GPL(usb_alloc_streams);
......@@ -2078,8 +2089,7 @@ EXPORT_SYMBOL_GPL(usb_alloc_streams);
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
*
* Return: On success, the number of allocated streams. On failure, a negative
* error code.
* Return: 0 on success. On failure, a negative error code.
*/
int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
......@@ -2087,19 +2097,26 @@ int usb_free_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
/* Streams only apply to bulk endpoints. */
/* Double-free is not allowed */
for (i = 0; i < num_eps; i++)
if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
if (!eps[i] || !eps[i]->streams)
return -EINVAL;
return hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_free_streams);
......
......@@ -141,19 +141,27 @@ static int usb_device_supports_lpm(struct usb_device *udev)
return 0;
}
/* All USB 3.0 must support LPM, but we need their max exit latency
* information from the SuperSpeed Extended Capabilities BOS descriptor.
/*
* According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
* However, there are some that don't, and they set the U1/U2 exit
* latencies to zero.
*/
if (!udev->bos->ss_cap) {
dev_warn(&udev->dev, "No LPM exit latency info found. "
"Power management will be impacted.\n");
dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
if (udev->bos->ss_cap->bU1devExitLat == 0 &&
udev->bos->ss_cap->bU2DevExitLat == 0) {
if (udev->parent)
dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
else
dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
if (udev->parent->lpm_capable)
return 1;
dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. "
"Power management will be impacted.\n");
if (!udev->parent || udev->parent->lpm_capable)
return 1;
return 0;
}
......@@ -4750,6 +4758,8 @@ static void hub_events(void)
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_device *udev = hub->ports[i - 1]->child;
if (test_bit(i, hub->busy_bits))
continue;
connect_change = test_bit(i, hub->change_bits);
......@@ -4848,8 +4858,6 @@ static void hub_events(void)
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
int status;
struct usb_device *udev =
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
if (!udev ||
......@@ -4866,6 +4874,24 @@ static void hub_events(void)
usb_unlock_device(udev);
connect_change = 0;
}
/*
* On disconnect USB3 protocol ports transit from U0 to
* SS.Inactive to Rx.Detect. If this happens a warm-
* reset is not needed, but a (re)connect may happen
* before khubd runs and sees the disconnect, and the
* device may be an unknown state.
*
* If the port went through SS.Inactive without khubd
* seeing it the C_LINK_STATE change flag will be set,
* and we reset the dev to put it in a known state.
*/
} else if (udev && hub_is_superspeed(hub->hdev) &&
(portchange & USB_PORT_STAT_C_LINK_STATE) &&
(portstatus & USB_PORT_STAT_CONNECTION)) {
usb_lock_device(udev);
usb_reset_device(udev);
usb_unlock_device(udev);
connect_change = 0;
}
if (connect_change)
......@@ -5123,7 +5149,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
struct usb_host_bos *bos;
int i, ret = 0;
int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
......@@ -5249,6 +5275,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
ret);
goto re_enumerate;
}
/* Resetting also frees any allocated streams */
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
intf->cur_altsetting->endpoint[j].streams = 0;
}
done:
......
......@@ -1293,8 +1293,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int ret;
int manual = 0;
int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
......@@ -1329,6 +1328,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Changing alt-setting also frees any allocated streams */
for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
iface->cur_altsetting->endpoint[i].streams = 0;
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
......
......@@ -732,9 +732,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Set the U1 and U2 exit latencies. */
memcpy(buf, &usb_bos_descriptor,
USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
temp = readl(&xhci->cap_regs->hcs_params3);
buf[12] = HCS_U1_LATENCY(temp);
put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
temp = readl(&xhci->cap_regs->hcs_params3);
buf[12] = HCS_U1_LATENCY(temp);
put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
}
/* Indicate whether the host has LTM support. */
temp = readl(&xhci->cap_regs->hcc_params);
......
......@@ -149,14 +149,140 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
}
/*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 1KB aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *seg,
gfp_t mem_flags)
{
unsigned long key;
int ret;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
/* Skip any segments that were already added. */
if (radix_tree_lookup(trb_address_map, key))
return 0;
ret = radix_tree_maybe_preload(mem_flags);
if (ret)
return ret;
ret = radix_tree_insert(trb_address_map,
key, ring);
radix_tree_preload_end();
return ret;
}
static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_segment *seg)
{
unsigned long key;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
if (radix_tree_lookup(trb_address_map, key))
radix_tree_delete(trb_address_map, key);
}
static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
struct xhci_segment *failed_seg;
int ret;
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
seg = first_seg;
do {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
if (seg == last_seg)
return 0;
seg = seg->next;
} while (seg != first_seg);
return 0;
remove_streams:
failed_seg = seg;
seg = first_seg;
do {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
seg = seg->next;
} while (seg != first_seg);
return ret;
}
static void xhci_remove_stream_mapping(struct xhci_ring *ring)
{
struct xhci_segment *seg;
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
seg = ring->first_seg;
do {
xhci_remove_segment_mapping(ring->trb_address_map, seg);
seg = seg->next;
} while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
ring->first_seg, ring->last_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
if (ring->first_seg)
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
xhci_free_segments_for_ring(xhci, ring->first_seg);
}
kfree(ring);
}
......@@ -349,6 +475,21 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
if (ring->type == TYPE_STREAM)
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
if (ret) {
struct xhci_segment *next;
do {
next = first->next;
xhci_segment_free(xhci, first);
if (first == last)
break;
first = next;
} while (true);
return ret;
}
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
......@@ -434,12 +575,12 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
if (size > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev, size,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
......@@ -462,12 +603,12 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
if (size > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev, size,
dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
......@@ -510,36 +651,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 64-byte aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
......@@ -548,7 +659,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
unsigned long key;
u64 addr;
int ret;
......@@ -603,6 +713,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
......@@ -612,10 +723,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
key = (unsigned long)
(cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
ret = radix_tree_insert(&stream_info->trb_address_map,
key, cur_ring);
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
......@@ -635,9 +743,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
......@@ -698,7 +803,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
{
int cur_stream;
struct xhci_ring *cur_ring;
dma_addr_t addr;
if (!stream_info)
return;
......@@ -707,9 +811,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
......@@ -1711,7 +1812,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
xhci->cmd_ring_reserved_trbs = 0;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
......@@ -1776,6 +1876,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
no_bw:
xhci->cmd_ring_reserved_trbs = 0;
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
......@@ -2274,11 +2375,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments,
* so we pick the greater alignment need.
* however, the command ring segment needs 64-byte aligned segments
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, 64, xhci->page_size);
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
......
......@@ -190,6 +190,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct usb_hcd *hcd;
driver = (struct hc_driver *)id->driver_data;
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
/* Register the USB 2.0 roothub.
* FIXME: USB core must know to register the USB 2.0 roothub first.
* This is sort of silly, because we could just set the HCD driver flags
......@@ -199,7 +203,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = usb_hcd_pci_probe(dev, id);
if (retval)
return retval;
goto put_runtime_pm;
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
......@@ -222,11 +226,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
/* We know the LPM timeout algorithms for this host, let the USB core
* enable and disable LPM for devices under the USB 3.0 roothub.
*/
if (xhci->quirks & XHCI_LPM_SUPPORT)
hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
return 0;
......@@ -234,6 +238,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_hcd_pci_remove(dev);
put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}
......
......@@ -158,6 +158,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
......
......@@ -546,9 +546,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
......@@ -573,8 +573,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
/* 4.6.9 the css flag is written to the stream context for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
} else {
struct xhci_ep_ctx *ep_ctx
= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
}
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
......@@ -892,6 +900,57 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
/* Return to the event handler with xhci->lock re-acquired */
}
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_td *cur_td;
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td, td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
int slot_id, int ep_index)
{
struct xhci_td *cur_td;
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
ep = &xhci->devs[slot_id]->eps[ep_index];
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
for (stream_id = 0; stream_id < ep->stream_info->num_streams;
stream_id++) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
slot_id, ep_index, stream_id + 1);
xhci_kill_ring_urbs(xhci,
ep->stream_info->stream_rings[stream_id]);
}
} else {
ring = ep->ring;
if (!ring)
return;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u",
slot_id, ep_index);
xhci_kill_ring_urbs(xhci, ring);
}
while (!list_empty(&ep->cancelled_td_list)) {
cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
......@@ -915,9 +974,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
struct xhci_virt_ep *temp_ep;
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
......@@ -974,34 +1030,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++) {
temp_ep = &xhci->devs[i]->eps[j];
ring = temp_ep->ring;
if (!ring)
continue;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, "
"ep index %u", i, j);
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN);
}
while (!list_empty(&temp_ep->cancelled_td_list)) {
cur_td = list_first_entry(
&temp_ep->cancelled_td_list,
struct xhci_td,
cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN);
}
}
for (j = 0; j < 31; j++)
xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
......@@ -1073,17 +1103,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for "
"freed stream ID %u\n",
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
......@@ -1099,12 +1130,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
switch (cmd_comp_code) {
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n");
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n");
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
......@@ -1114,13 +1143,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
slot_state, ep_state);
break;
case COMP_EBADSLT:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
"slot %u was not enabled.\n", slot_id);
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
slot_id);
break;
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n",
cmd_comp_code);
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
......@@ -1130,23 +1158,28 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* cancelling URBs, which might not be an error...
*/
} else {
u64 deq;
/* 4.6.10 deq ptr is written to the stream ctx for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Successful Set TR Deq Ptr cmd, deq = @%08llx",
le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) ==
(le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr);
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
......@@ -4070,6 +4103,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
......@@ -4088,7 +4122,9 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
if (stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}
......
......@@ -390,6 +390,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
}
legacy_irq:
if (!strlen(hcd->irq_descr))
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
......@@ -2678,6 +2682,20 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return ret;
}
static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
struct xhci_virt_device *vdev, int i)
{
struct xhci_virt_ep *ep = &vdev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
}
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
......@@ -2742,8 +2760,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
}
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
......@@ -2759,6 +2779,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
......@@ -2954,7 +2975,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
if (ep->ss_ep_comp.bmAttributes == 0) {
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
......@@ -3121,6 +3142,12 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
/* MaxPSASize value 0 (2 streams) means streams are not supported */
if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
xhci_dbg(xhci, "xHCI controller does not support streams.\n");
return -ENOSYS;
}
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
......@@ -3519,6 +3546,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
......
......@@ -703,6 +703,7 @@ struct xhci_ep_ctx {
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
#define SCTX_DEQ_MASK (~0xfL)
/**
......@@ -1118,9 +1119,10 @@ enum xhci_setup_dev {
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
/* Set TR Dequeue Pointer command TRB fields */
/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/* Port Status Change Event TRB fields */
......@@ -1341,6 +1343,7 @@ struct xhci_ring {
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
};
struct xhci_erst_entry {
......
......@@ -204,7 +204,7 @@ config USB_STORAGE_ENE_UB6250
config USB_UAS
tristate "USB Attached SCSI"
depends on SCSI && BROKEN
depends on SCSI && USB_STORAGE
help
The USB Attached SCSI protocol is supported by some USB
storage devices. It permits higher performance by supporting
......
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usb.h"
static int uas_is_interface(struct usb_host_interface *intf)
{
return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
static int uas_isnt_supported(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
dev_warn(&udev->dev, "The driver for the USB controller %s does not "
"support scatter-gather which is\n",
hcd->driver->description);
dev_warn(&udev->dev, "required by the UAS driver. Please try an"
"alternative USB controller if you wish to use UAS.\n");
return -ENODEV;
}
static int uas_find_uas_alt_setting(struct usb_interface *intf)
{
int i;
struct usb_device *udev = interface_to_usbdev(intf);
int sg_supported = udev->bus->sg_tablesize != 0;
for (i = 0; i < intf->num_altsetting; i++) {
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt)) {
if (!sg_supported)
return uas_isnt_supported(udev);
return alt->desc.bAlternateSetting;
}
}
return -ENODEV;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
struct usb_host_endpoint *eps[])
{
struct usb_host_endpoint *endpoint = alt->endpoint;
unsigned i, n_endpoints = alt->desc.bNumEndpoints;
for (i = 0; i < n_endpoints; i++) {
unsigned char *extra = endpoint[i].extra;
int len = endpoint[i].extralen;
while (len >= 3) {
if (extra[1] == USB_DT_PIPE_USAGE) {
unsigned pipe_id = extra[2];
if (pipe_id > 0 && pipe_id < 5)
eps[pipe_id - 1] = &endpoint[i];
break;
}
len -= extra[0];
extra += extra[0];
}
}
if (!eps[0] || !eps[1] || !eps[2] || !eps[3])
return -ENODEV;
return 0;
}
static int uas_use_uas_driver(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
int r, alt;
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS)
return 0;
if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams)
return 0;
alt = uas_find_uas_alt_setting(intf);
if (alt < 0)
return 0;
r = uas_find_endpoints(&intf->altsetting[alt], eps);
if (r < 0)
return 0;
return 1;
}
......@@ -2,6 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
......@@ -13,17 +14,21 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "uas-detect.h"
/*
* The r00-r01c specs define this version of the SENSE IU data structure.
* It's still in use by several different firmware releases.
......@@ -45,12 +50,17 @@ struct uas_dev_info {
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
int qdepth, resetting;
struct response_ui response;
struct response_iu response;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned uas_sense_old:1;
unsigned running_task:1;
unsigned shutdown:1;
struct scsi_cmnd *cmnd;
spinlock_t lock;
struct work_struct work;
struct list_head inflight_list;
struct list_head dead_list;
};
enum {
......@@ -85,103 +95,117 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller);
static DECLARE_WORK(uas_work, uas_do_work);
static DEFINE_SPINLOCK(uas_work_lock);
static LIST_HEAD(uas_work_list);
/* Must be called with devinfo->lock held, will temporary unlock the lock */
static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
struct uas_cmd_info *cmdinfo)
struct uas_cmd_info *cmdinfo,
unsigned long *lock_flags)
{
unsigned long flags;
/*
* The UNLINK_DATA_URBS flag makes sure uas_try_complete
* (called by urb completion) doesn't release cmdinfo
* underneath us.
*/
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= UNLINK_DATA_URBS;
spin_unlock_irqrestore(&devinfo->lock, flags);
spin_unlock_irqrestore(&devinfo->lock, *lock_flags);
if (cmdinfo->data_in_urb)
usb_unlink_urb(cmdinfo->data_in_urb);
if (cmdinfo->data_out_urb)
usb_unlink_urb(cmdinfo->data_out_urb);
spin_lock_irqsave(&devinfo->lock, flags);
spin_lock_irqsave(&devinfo->lock, *lock_flags);
cmdinfo->state &= ~UNLINK_DATA_URBS;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
container_of(work, struct uas_dev_info, work);
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
struct list_head list;
unsigned long flags;
int err;
spin_lock_irq(&uas_work_lock);
list_replace_init(&uas_work_list, &list);
spin_unlock_irq(&uas_work_lock);
list_for_each_entry_safe(cmdinfo, temp, &list, list) {
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry(cmdinfo, &devinfo->inflight_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp,
struct scsi_cmnd, SCp);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
spin_lock_irqsave(&devinfo->lock, flags);
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
SCp);
if (!(cmdinfo->state & IS_IN_WORK_LIST))
continue;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
spin_unlock_irqrestore(&devinfo->lock, flags);
if (err) {
list_del(&cmdinfo->list);
spin_lock_irq(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
spin_unlock_irq(&uas_work_lock);
schedule_work(&uas_work);
}
else
schedule_work(&devinfo->work);
}
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_abort_work(struct uas_dev_info *devinfo)
static void uas_mark_cmd_dead(struct uas_dev_info *devinfo,
struct uas_cmd_info *cmdinfo,
int result, const char *caller)
{
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
uas_log_cmd_state(cmnd, caller);
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
WARN_ON_ONCE(cmdinfo->state & COMMAND_ABORTED);
cmdinfo->state |= COMMAND_ABORTED;
cmdinfo->state &= ~IS_IN_WORK_LIST;
cmnd->result = result << 16;
list_move_tail(&cmdinfo->list, &devinfo->dead_list);
}
static void uas_abort_inflight(struct uas_dev_info *devinfo, int result,
const char *caller)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
struct list_head list;
unsigned long flags;
spin_lock_irq(&uas_work_lock);
list_replace_init(&uas_work_list, &list);
spin_unlock_irq(&uas_work_lock);
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry_safe(cmdinfo, temp, &devinfo->inflight_list, list)
uas_mark_cmd_dead(devinfo, cmdinfo, result, caller);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
struct uas_dev_info *devinfo = cmnd->device->hostdata;
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
cmdinfo->state |= IS_IN_WORK_LIST;
schedule_work(&devinfo->work);
}
static void uas_zap_dead(struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry_safe(cmdinfo, temp, &list, list) {
list_for_each_entry_safe(cmdinfo, temp, &devinfo->dead_list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp,
struct scsi_cmnd, SCp);
struct uas_dev_info *di = (void *)cmnd->device->hostdata;
if (di == devinfo) {
cmdinfo->state |= COMMAND_ABORTED;
cmdinfo->state &= ~IS_IN_WORK_LIST;
if (devinfo->resetting) {
/* uas_stat_cmplt() will not do that
* when a device reset is in
* progress */
cmdinfo->state &= ~COMMAND_INFLIGHT;
}
uas_try_complete(cmnd, __func__);
} else {
/* not our uas device, relink into list */
list_del(&cmdinfo->list);
spin_lock_irq(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
spin_unlock_irq(&uas_work_lock);
}
struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd,
SCp);
uas_log_cmd_state(cmnd, __func__);
WARN_ON_ONCE(!(cmdinfo->state & COMMAND_ABORTED));
/* all urbs are killed, clear inflight bits */
cmdinfo->state &= ~(COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT);
uas_try_complete(cmnd, __func__);
}
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
......@@ -259,20 +283,19 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
WARN_ON(!spin_is_locked(&devinfo->lock));
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
UNLINK_DATA_URBS))
return -EBUSY;
BUG_ON(cmdinfo->state & COMMAND_COMPLETED);
WARN_ON_ONCE(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
usb_free_urb(cmdinfo->data_in_urb);
usb_free_urb(cmdinfo->data_out_urb);
if (cmdinfo->state & COMMAND_ABORTED) {
if (cmdinfo->state & COMMAND_ABORTED)
scmd_printk(KERN_INFO, cmnd, "abort completed\n");
cmnd->result = DID_ABORT << 16;
}
list_del(&cmdinfo->list);
cmnd->scsi_done(cmnd);
return 0;
}
......@@ -286,11 +309,7 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (err) {
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
uas_add_work(cmdinfo);
}
}
......@@ -298,14 +317,20 @@ static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
u16 tag;
if (urb->status) {
dev_err(&urb->dev->dev, "URB BAD STATUS %d\n", urb->status);
if (urb->status == -ENOENT) {
dev_err(&urb->dev->dev, "stat urb: killed, stream %d\n",
urb->stream_id);
} else {
dev_err(&urb->dev->dev, "stat urb: status %d\n",
urb->status);
}
usb_free_urb(urb);
return;
}
......@@ -324,6 +349,9 @@ static void uas_stat_cmplt(struct urb *urb)
if (!cmnd) {
if (iu->iu_id == IU_ID_RESPONSE) {
if (!devinfo->running_task)
dev_warn(&urb->dev->dev,
"stat urb: recv unexpected response iu\n");
/* store results for uas_eh_task_mgmt() */
memcpy(&devinfo->response, iu, sizeof(devinfo->response));
}
......@@ -346,17 +374,25 @@ static void uas_stat_cmplt(struct urb *urb)
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
spin_unlock_irqrestore(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo);
spin_lock_irqsave(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
if (!cmdinfo->data_in_urb ||
(cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
scmd_printk(KERN_ERR, cmnd, "unexpected read rdy\n");
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
if (!cmdinfo->data_out_urb ||
(cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
scmd_printk(KERN_ERR, cmnd, "unexpected write rdy\n");
break;
}
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
default:
......@@ -383,8 +419,15 @@ static void uas_data_cmplt(struct urb *urb)
sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
}
BUG_ON(sdb == NULL);
if (urb->status) {
if (sdb == NULL) {
WARN_ON_ONCE(1);
} else if (urb->status) {
if (urb->status != -ECONNRESET) {
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd,
"data cmplt err %d stream %d\n",
urb->status, urb->stream_id);
}
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
......@@ -394,6 +437,17 @@ static void uas_data_cmplt(struct urb *urb)
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_cmd_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
if (urb->status) {
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd, "cmd cmplt err %d\n", urb->status);
}
usb_free_urb(urb);
}
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
unsigned int pipe, u16 stream_id,
struct scsi_cmnd *cmnd,
......@@ -408,8 +462,7 @@ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = stream_id;
urb->stream_id = stream_id;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
......@@ -442,7 +495,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd, u16 stream_id)
struct scsi_cmnd *cmnd)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
......@@ -472,7 +525,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
usb_free_urb, NULL);
uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
......@@ -512,13 +565,17 @@ static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
}
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu),
usb_free_urb, NULL);
uas_cmd_cmplt, cmnd);
urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &devinfo->cmd_urbs);
err = usb_submit_urb(urb, gfp);
if (err)
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_ERR, cmnd, "task submission err %d\n", err);
goto err;
usb_anchor_urb(urb, &devinfo->cmd_urbs);
}
return 0;
......@@ -533,38 +590,43 @@ static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
* daft to me.
*/
static int uas_submit_sense_urb(struct Scsi_Host *shost,
gfp_t gfp, unsigned int stream)
static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd,
gfp_t gfp, unsigned int stream)
{
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
struct Scsi_Host *shost = cmnd->device->host;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
struct urb *urb;
int err;
urb = uas_alloc_sense_urb(devinfo, gfp, shost, stream);
if (!urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
if (usb_submit_urb(urb, gfp)) {
return NULL;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, __func__);
shost_printk(KERN_INFO, shost,
"sense urb submission failure\n");
"sense urb submission error %d stream %d\n",
err, stream);
usb_free_urb(urb);
return SCSI_MLQUEUE_DEVICE_BUSY;
return NULL;
}
usb_anchor_urb(urb, &devinfo->sense_urbs);
return 0;
return urb;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct urb *urb;
int err;
WARN_ON(!spin_is_locked(&devinfo->lock));
WARN_ON_ONCE(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & SUBMIT_STATUS_URB) {
err = uas_submit_sense_urb(cmnd->device->host, gfp,
cmdinfo->stream);
if (err) {
return err;
}
urb = uas_submit_sense_urb(cmnd, gfp, cmdinfo->stream);
if (!urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
......@@ -578,14 +640,18 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
if (usb_submit_urb(cmdinfo->data_in_urb, gfp)) {
usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_in_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"data in urb submission failure\n");
"data in urb submission error %d stream %d\n",
err, cmdinfo->data_in_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
......@@ -598,33 +664,37 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
if (usb_submit_urb(cmdinfo->data_out_urb, gfp)) {
usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
err = usb_submit_urb(cmdinfo->data_out_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"data out urb submission failure\n");
"data out urb submission error %d stream %d\n",
err, cmdinfo->data_out_urb->stream_id);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd,
cmdinfo->stream);
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
usb_get_urb(cmdinfo->cmd_urb);
if (usb_submit_urb(cmdinfo->cmd_urb, gfp)) {
usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
err = usb_submit_urb(cmdinfo->cmd_urb, gfp);
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, __func__);
scmd_printk(KERN_INFO, cmnd,
"cmd urb submission failure\n");
"cmd urb submission error %d\n", err);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
usb_put_urb(cmdinfo->cmd_urb);
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
......@@ -644,18 +714,22 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
memset(cmdinfo, 0, sizeof(*cmdinfo));
if (blk_rq_tagged(cmnd->request)) {
cmdinfo->stream = cmnd->request->tag + 2;
} else {
......@@ -692,13 +766,10 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
uas_add_work(cmdinfo);
}
list_add_tail(&cmdinfo->list, &devinfo->inflight_list);
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
......@@ -709,46 +780,78 @@ static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
const char *fname, u8 function)
{
struct Scsi_Host *shost = cmnd->device->host;
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
u16 tag = devinfo->qdepth - 1;
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
u16 tag = devinfo->qdepth;
unsigned long flags;
struct urb *sense_urb;
int result = SUCCESS;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
if (devinfo->running_task) {
shost_printk(KERN_INFO, shost,
"%s: %s: error already running a task\n",
__func__, fname);
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
devinfo->running_task = 1;
memset(&devinfo->response, 0, sizeof(devinfo->response));
if (uas_submit_sense_urb(shost, GFP_ATOMIC, tag)) {
sense_urb = uas_submit_sense_urb(cmnd, GFP_NOIO,
devinfo->use_streams ? tag : 0);
if (!sense_urb) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit sense urb failed\n",
__func__, fname);
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
if (uas_submit_task_urb(cmnd, GFP_NOIO, function, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit task mgmt urb failed\n",
__func__, fname);
devinfo->running_task = 0;
spin_unlock_irqrestore(&devinfo->lock, flags);
usb_kill_urb(sense_urb);
return FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 3000) == 0) {
/*
* Note we deliberately do not clear running_task here. If we
* allow new tasks to be submitted, there is no way to figure
* out if a received response_iu is for the failed task or for
* the new one. A bus-reset will eventually clear running_task.
*/
shost_printk(KERN_INFO, shost,
"%s: %s timed out\n", __func__, fname);
return FAILED;
}
spin_lock_irqsave(&devinfo->lock, flags);
devinfo->running_task = 0;
if (be16_to_cpu(devinfo->response.tag) != tag) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (wrong tag %d/%d)\n", __func__,
fname, be16_to_cpu(devinfo->response.tag), tag);
return FAILED;
}
if (devinfo->response.response_code != RC_TMF_COMPLETE) {
result = FAILED;
} else if (devinfo->response.response_code != RC_TMF_COMPLETE) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (rc 0x%x)\n", __func__,
fname, devinfo->response.response_code);
return FAILED;
result = FAILED;
}
return SUCCESS;
spin_unlock_irqrestore(&devinfo->lock, flags);
return result;
}
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
......@@ -758,22 +861,19 @@ static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
int ret;
uas_log_cmd_state(cmnd, __func__);
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= COMMAND_ABORTED;
if (cmdinfo->state & IS_IN_WORK_LIST) {
spin_lock(&uas_work_lock);
list_del(&cmdinfo->list);
cmdinfo->state &= ~IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
if (devinfo->resetting) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
uas_mark_cmd_dead(devinfo, cmdinfo, DID_ABORT, __func__);
if (cmdinfo->state & COMMAND_INFLIGHT) {
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
} else {
spin_unlock_irqrestore(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo);
spin_lock_irqsave(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo, &flags);
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = SUCCESS;
......@@ -795,14 +895,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
struct usb_device *udev = devinfo->udev;
int err;
err = usb_lock_device_for_reset(udev, devinfo->intf);
if (err) {
shost_printk(KERN_ERR, sdev->host,
"%s FAILED to get lock err %d\n", __func__, err);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
devinfo->resetting = 1;
uas_abort_work(devinfo);
uas_abort_inflight(devinfo, DID_RESET, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_dead(devinfo);
err = usb_reset_device(udev);
devinfo->resetting = 0;
usb_unlock_device(udev);
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
return FAILED;
......@@ -814,7 +925,25 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
static int uas_slave_alloc(struct scsi_device *sdev)
{
sdev->hostdata = (void *)sdev->host->hostdata[0];
sdev->hostdata = (void *)sdev->host->hostdata;
/* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
* express this by block-layer constraints, so we'll cop out
* and simply require addresses to be aligned at 512-byte
* boundaries. This is okay since most block I/O involves
* hardware sectors that are multiples of 512 bytes in length,
* and since host controllers up through USB 2.0 have maxpacket
* values no larger than 512.
*
* But it doesn't suffice for Wireless USB, where Bulk maxpacket
* values can be as large as 2048. To make that work properly
* will require changes to the block layer.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
return 0;
}
......@@ -822,7 +951,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
scsi_activate_tcq(sdev, devinfo->qdepth - 3);
scsi_activate_tcq(sdev, devinfo->qdepth - 2);
return 0;
}
......@@ -843,7 +972,14 @@ static struct scsi_host_template uas_host_template = {
.ordered_tag = 1,
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id uas_usb_ids[] = {
# include "unusual_uas.h"
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
/* 0xaa is a prototype device I happen to have access to */
......@@ -852,105 +988,55 @@ static struct usb_device_id uas_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
static int uas_is_interface(struct usb_host_interface *intf)
{
return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
static int uas_isnt_supported(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
dev_warn(&udev->dev, "The driver for the USB controller %s does not "
"support scatter-gather which is\n",
hcd->driver->description);
dev_warn(&udev->dev, "required by the UAS driver. Please try an"
"alternative USB controller if you wish to use UAS.\n");
return -ENODEV;
}
#undef UNUSUAL_DEV
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
struct usb_interface *intf)
{
int i;
int sg_supported = udev->bus->sg_tablesize != 0;
for (i = 0; i < intf->num_altsetting; i++) {
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt)) {
if (!sg_supported)
return uas_isnt_supported(udev);
return usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
}
}
int alt;
alt = uas_find_uas_alt_setting(intf);
if (alt < 0)
return alt;
return -ENODEV;
return usb_set_interface(udev,
intf->altsetting[0].desc.bInterfaceNumber, alt);
}
static void uas_configure_endpoints(struct uas_dev_info *devinfo)
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_interface *intf = devinfo->intf;
struct usb_device *udev = devinfo->udev;
struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
unsigned i, n_endpoints = intf->cur_altsetting->desc.bNumEndpoints;
int r;
devinfo->uas_sense_old = 0;
devinfo->cmnd = NULL;
for (i = 0; i < n_endpoints; i++) {
unsigned char *extra = endpoint[i].extra;
int len = endpoint[i].extralen;
while (len > 1) {
if (extra[1] == USB_DT_PIPE_USAGE) {
unsigned pipe_id = extra[2];
if (pipe_id > 0 && pipe_id < 5)
eps[pipe_id - 1] = &endpoint[i];
break;
}
len -= extra[0];
extra += extra[0];
}
}
r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
if (r)
return r;
/*
* Assume that if we didn't find a control pipe descriptor, we're
* using a device with old firmware that happens to be set up like
* this.
*/
if (!eps[0]) {
devinfo->cmd_pipe = usb_sndbulkpipe(udev, 1);
devinfo->status_pipe = usb_rcvbulkpipe(udev, 1);
devinfo->data_in_pipe = usb_rcvbulkpipe(udev, 2);
devinfo->data_out_pipe = usb_sndbulkpipe(udev, 2);
eps[1] = usb_pipe_endpoint(udev, devinfo->status_pipe);
eps[2] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
eps[3] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
} else {
devinfo->cmd_pipe = usb_sndbulkpipe(udev,
eps[0]->desc.bEndpointAddress);
devinfo->status_pipe = usb_rcvbulkpipe(udev,
eps[1]->desc.bEndpointAddress);
devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
eps[2]->desc.bEndpointAddress);
devinfo->data_out_pipe = usb_sndbulkpipe(udev,
eps[3]->desc.bEndpointAddress);
}
devinfo->cmd_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[0]->desc));
devinfo->status_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[1]->desc));
devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
usb_endpoint_num(&eps[2]->desc));
devinfo->data_out_pipe = usb_sndbulkpipe(udev,
usb_endpoint_num(&eps[3]->desc));
devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, 256,
GFP_KERNEL);
if (devinfo->qdepth < 0) {
if (udev->speed != USB_SPEED_SUPER) {
devinfo->qdepth = 256;
devinfo->use_streams = 0;
} else {
devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
3, 256, GFP_KERNEL);
if (devinfo->qdepth < 0)
return devinfo->qdepth;
devinfo->use_streams = 1;
}
return 0;
}
static void uas_free_streams(struct uas_dev_info *devinfo)
......@@ -964,30 +1050,23 @@ static void uas_free_streams(struct uas_dev_info *devinfo)
usb_free_streams(devinfo->intf, eps, 3, GFP_KERNEL);
}
/*
* XXX: What I'd like to do here is register a SCSI host for each USB host in
* the system. Follow usb-storage's design of registering a SCSI host for
* each USB device for the moment. Can implement this by walking up the
* USB hierarchy until we find a USB host.
*/
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int result;
struct Scsi_Host *shost;
int result = -ENOMEM;
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
if (uas_switch_interface(udev, intf))
if (!uas_use_uas_driver(intf, id))
return -ENODEV;
devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
if (!devinfo)
return -ENOMEM;
if (uas_switch_interface(udev, intf))
return -ENODEV;
result = -ENOMEM;
shost = scsi_host_alloc(&uas_host_template, sizeof(void *));
shost = scsi_host_alloc(&uas_host_template,
sizeof(struct uas_dev_info));
if (!shost)
goto free;
goto set_alt0;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
......@@ -995,33 +1074,40 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
devinfo->running_task = 0;
devinfo->shutdown = 0;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
uas_configure_endpoints(devinfo);
INIT_WORK(&devinfo->work, uas_do_work);
INIT_LIST_HEAD(&devinfo->inflight_list);
INIT_LIST_HEAD(&devinfo->dead_list);
result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 3);
result = uas_configure_endpoints(devinfo);
if (result)
goto free;
goto set_alt0;
result = scsi_add_host(shost, &intf->dev);
result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 2);
if (result)
goto deconfig_eps;
goto free_streams;
shost->hostdata[0] = (unsigned long)devinfo;
result = scsi_add_host(shost, &intf->dev);
if (result)
goto free_streams;
scsi_scan_host(shost);
usb_set_intfdata(intf, shost);
return result;
deconfig_eps:
free_streams:
uas_free_streams(devinfo);
free:
kfree(devinfo);
set_alt0:
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
if (shost)
scsi_host_put(shost);
return result;
......@@ -1029,45 +1115,146 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
static int uas_pre_reset(struct usb_interface *intf)
{
/* XXX: Need to return 1 if it's not our device in error handling */
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (devinfo->shutdown)
return 0;
/* Block new requests */
spin_lock_irqsave(shost->host_lock, flags);
scsi_block_requests(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
/* Wait for any pending requests to complete */
flush_work(&devinfo->work);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
return 1;
}
uas_free_streams(devinfo);
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
/* XXX: Need to return 1 if it's not our device in error handling */
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (devinfo->shutdown)
return 0;
if (uas_configure_endpoints(devinfo) != 0) {
shost_printk(KERN_ERR, shost,
"%s: alloc streams error after reset", __func__);
return 1;
}
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_unblock_requests(shost);
return 0;
}
static int uas_suspend(struct usb_interface *intf, pm_message_t message)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
/* Wait for any pending requests to complete */
flush_work(&devinfo->work);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000) == 0) {
shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
return -ETIME;
}
return 0;
}
static int uas_resume(struct usb_interface *intf)
{
return 0;
}
static int uas_reset_resume(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
unsigned long flags;
if (uas_configure_endpoints(devinfo) != 0) {
shost_printk(KERN_ERR, shost,
"%s: alloc streams error after reset", __func__);
return -EIO;
}
spin_lock_irqsave(shost->host_lock, flags);
scsi_report_bus_reset(shost, 0);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
devinfo->resetting = 1;
uas_abort_work(devinfo);
cancel_work_sync(&devinfo->work);
uas_abort_inflight(devinfo, DID_NO_CONNECT, __func__);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_dead(devinfo);
scsi_remove_host(shost);
uas_free_streams(devinfo);
kfree(devinfo);
scsi_host_put(shost);
}
/*
* XXX: Should this plug into libusual so we can auto-upgrade devices from
* Bulk-Only to UAS?
* Put the device back in usb-storage mode on shutdown, as some BIOS-es
* hang on reboot when the device is still in uas mode. Note the reset is
* necessary as some devices won't revert to usb-storage mode without it.
*/
static void uas_shutdown(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
if (system_state != SYSTEM_RESTART)
return;
devinfo->shutdown = 1;
uas_free_streams(devinfo);
usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
usb_reset_device(udev);
}
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
.suspend = uas_suspend,
.resume = uas_resume,
.reset_resume = uas_reset_resume,
.drvwrap.driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
MODULE_AUTHOR(
"Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp");
......@@ -2086,6 +2086,11 @@ UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
"Digital MP3 Audio Player",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
/* Unusual uas devices */
#if IS_ENABLED(CONFIG_USB_UAS)
#include "unusual_uas.h"
#endif
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(USB_SC_RBC, USB_PR_CB),
USUAL_DEV(USB_SC_8020, USB_PR_CB),
......
/* Driver for USB Attached SCSI devices - Unusual Devices File
*
* (c) 2013 Hans de Goede <hdegoede@redhat.com>
*
* Based on the same file for the usb-storage driver, which is:
* (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* IMPORTANT NOTE: This file must be included in another file which defines
* a UNUSUAL_DEV macro before this file is included.
*/
/*
* If you edit this file, please try to keep it sorted first by VendorID,
* then by ProductID.
*
* If you want to add an entry for this file, be sure to include the
* following information:
* - a patch that adds the entry for your device, including your
* email address right above the entry (plus maybe a brief
* explanation of the reason for the entry),
* - lsusb -v output for the device
* Send your submission to Hans de Goede <hdegoede@redhat.com>
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
/*
* This is an example entry for the US_FL_IGNORE_UAS flag. Once we have an
* actual entry using US_FL_IGNORE_UAS this entry should be removed.
*
* UNUSUAL_DEV( 0xabcd, 0x1234, 0x0100, 0x0100,
* "Example",
* "Storage with broken UAS",
* USB_SC_DEVICE, USB_PR_DEVICE, NULL,
* US_FL_IGNORE_UAS),
*/
......@@ -72,6 +72,10 @@
#include "sierra_ms.h"
#include "option_ms.h"
#if IS_ENABLED(CONFIG_USB_UAS)
#include "uas-detect.h"
#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
......@@ -459,14 +463,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
static void adjust_quirks(struct us_data *us)
void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
{
char *p;
u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
u16 vid = le16_to_cpu(udev->descriptor.idVendor);
u16 pid = le16_to_cpu(udev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
US_FL_FIX_CAPACITY |
US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
......@@ -537,14 +541,18 @@ static void adjust_quirks(struct us_data *us)
case 's':
f |= US_FL_SINGLE_LUN;
break;
case 'u':
f |= US_FL_IGNORE_UAS;
break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
/* Ignore unrecognized flag characters */
}
}
us->fflags = (us->fflags & ~mask) | f;
*fflags = (*fflags & ~mask) | f;
}
EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
......@@ -564,7 +572,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
adjust_quirks(us);
usb_stor_adjust_quirks(us->pusb_dev, &us->fflags);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
......@@ -1035,6 +1043,12 @@ static int storage_probe(struct usb_interface *intf,
int result;
int size;
/* If uas is enabled and this device can do uas then ignore it. */
#if IS_ENABLED(CONFIG_USB_UAS)
if (uas_use_uas_driver(intf, id))
return -ENXIO;
#endif
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.
......
......@@ -201,4 +201,7 @@ extern int usb_stor_probe1(struct us_data **pus,
extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
extern void usb_stor_adjust_quirks(struct usb_device *dev,
unsigned long *fflags);
#endif
......@@ -57,6 +57,7 @@ struct ep_device;
* @extra: descriptors following this endpoint in the configuration
* @extralen: how many bytes of "extra" are valid
* @enabled: URBs may be submitted to this endpoint
* @streams: number of USB-3 streams allocated on the endpoint
*
* USB requests are always queued to a given endpoint, identified by a
* descriptor within an active interface in a given USB configuration.
......@@ -71,6 +72,7 @@ struct usb_host_endpoint {
unsigned char *extra; /* Extra descriptors */
int extralen;
int enabled;
int streams;
};
/* host-side wrapper for one interface setting's parsed descriptors */
......@@ -202,6 +204,8 @@ static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
struct usb_interface *usb_get_intf(struct usb_interface *intf);
void usb_put_intf(struct usb_interface *intf);
/* Hard limit */
#define USB_MAXENDPOINTS 30
/* this maximum is arbitrary */
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)
......
......@@ -143,6 +143,7 @@ struct usb_hcd {
unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
......
......@@ -9,7 +9,7 @@ struct iu {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
};
} __attribute__((__packed__));
enum {
IU_ID_COMMAND = 0x01,
......@@ -52,7 +52,7 @@ struct command_iu {
__u8 rsvd7;
struct scsi_lun lun;
__u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */
};
} __attribute__((__packed__));
struct task_mgmt_iu {
__u8 iu_id;
......@@ -62,7 +62,7 @@ struct task_mgmt_iu {
__u8 rsvd2;
__be16 task_tag;
struct scsi_lun lun;
};
} __attribute__((__packed__));
/*
* Also used for the Read Ready and Write Ready IUs since they have the
......@@ -77,15 +77,15 @@ struct sense_iu {
__u8 rsvd7[7];
__be16 len;
__u8 sense[SCSI_SENSE_BUFFERSIZE];
};
} __attribute__((__packed__));
struct response_ui {
struct response_iu {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
__be16 add_response_info;
__u8 add_response_info[3];
__u8 response_code;
};
} __attribute__((__packed__));
struct usb_pipe_usage_descriptor {
__u8 bLength;
......
......@@ -67,8 +67,10 @@
/* Initial READ(10) (and others) must be retried */ \
US_FLAG(WRITE_CACHE, 0x00200000) \
/* Write Cache status is not available */ \
US_FLAG(NEEDS_CAP16, 0x00400000)
/* cannot handle READ_CAPACITY_10 */
US_FLAG(NEEDS_CAP16, 0x00400000) \
/* cannot handle READ_CAPACITY_10 */ \
US_FLAG(IGNORE_UAS, 0x00800000) \
/* Device advertises UAS but it is broken */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
......
......@@ -102,7 +102,10 @@ struct usbdevfs_urb {
int buffer_length;
int actual_length;
int start_frame;
int number_of_packets;
union {
int number_of_packets; /* Only used for isoc urbs */
unsigned int stream_id; /* Only used with bulk streams */
};
int error_count;
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
......@@ -144,6 +147,11 @@ struct usbdevfs_disconnect_claim {
char driver[USBDEVFS_MAXDRIVERNAME + 1];
};
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
unsigned char eps[0];
};
#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
......@@ -176,5 +184,7 @@ struct usbdevfs_disconnect_claim {
#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int)
#define USBDEVFS_GET_CAPABILITIES _IOR('U', 26, __u32)
#define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim)
#define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams)
#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment