Commit 670062e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'usb-6.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

Pull USB / Thunderbolt fixes from Greg KH:
 "Here are some small USB and Thunderbolt driver fixes and new device
  ids for 6.4-rc7 to resolve some reported problems. Included in here
  are:

   - new USB serial device ids

   - USB gadget core fixes for long-dissussed problems

   - dwc3 bugfixes for reported issues.

   - typec driver fixes

   - thunderbolt driver fixes

  All of these have been in linux-next this week with no reported issues"

* tag 'usb-6.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb:
  usb: gadget: udc: core: Prevent soft_connect_store() race
  usb: gadget: udc: core: Offload usb_udc_vbus_handler processing
  usb: typec: Fix fast_role_swap_current show function
  usb: typec: ucsi: Fix command cancellation
  USB: dwc3: fix use-after-free on core driver unbind
  USB: dwc3: qcom: fix NULL-deref on suspend
  usb: dwc3: gadget: Reset num TRBs before giving back the request
  usb: gadget: udc: renesas_usb3: Fix RZ/V2M {modprobe,bind} error
  USB: serial: option: add Quectel EM061KGL series
  thunderbolt: Mask ring interrupt on Intel hardware as well
  thunderbolt: Do not touch CL state configuration during discovery
  thunderbolt: Increase DisplayPort Connection Manager handshake timeout
  thunderbolt: dma_test: Use correct value for absent rings when creating paths
parents 3c0eb442 286d9975
...@@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
} }
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0, dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid, dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0); dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) { if (ret) {
dma_test_free_rings(dt); dma_test_free_rings(dt);
return ret; return ret;
...@@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt) ...@@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
tb_ring_stop(dt->tx_ring); tb_ring_stop(dt->tx_ring);
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0, dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid, dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0); dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
......
...@@ -56,9 +56,14 @@ static int ring_interrupt_index(const struct tb_ring *ring) ...@@ -56,9 +56,14 @@ static int ring_interrupt_index(const struct tb_ring *ring)
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{ {
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
return; u32 val;
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
} else {
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
}
} }
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
......
...@@ -737,6 +737,7 @@ static void tb_scan_port(struct tb_port *port) ...@@ -737,6 +737,7 @@ static void tb_scan_port(struct tb_port *port)
{ {
struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port; struct tb_port *upstream_port;
bool discovery = false;
struct tb_switch *sw; struct tb_switch *sw;
int ret; int ret;
...@@ -804,8 +805,10 @@ static void tb_scan_port(struct tb_port *port) ...@@ -804,8 +805,10 @@ static void tb_scan_port(struct tb_port *port)
* tunnels and know which switches were authorized already by * tunnels and know which switches were authorized already by
* the boot firmware. * the boot firmware.
*/ */
if (!tcm->hotplug_active) if (!tcm->hotplug_active) {
dev_set_uevent_suppress(&sw->dev, true); dev_set_uevent_suppress(&sw->dev, true);
discovery = true;
}
/* /*
* At the moment Thunderbolt 2 and beyond (devices with LC) we * At the moment Thunderbolt 2 and beyond (devices with LC) we
...@@ -835,10 +838,14 @@ static void tb_scan_port(struct tb_port *port) ...@@ -835,10 +838,14 @@ static void tb_scan_port(struct tb_port *port)
* CL0s and CL1 are enabled and supported together. * CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported. * Silently ignore CLx enabling in case CLx is not supported.
*/ */
ret = tb_switch_enable_clx(sw, TB_CL1); if (discovery) {
if (ret && ret != -EOPNOTSUPP) tb_sw_dbg(sw, "discovery, not touching CL states\n");
tb_sw_warn(sw, "failed to enable %s on upstream port\n", } else {
tb_switch_clx_name(TB_CL1)); ret = tb_switch_enable_clx(sw, TB_CL1);
if (ret && ret != -EOPNOTSUPP)
tb_sw_warn(sw, "failed to enable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
}
if (tb_switch_is_clx_enabled(sw, TB_CL1)) if (tb_switch_is_clx_enabled(sw, TB_CL1))
/* /*
......
...@@ -526,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) ...@@ -526,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports * Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place. * before capabilities exchange can take place.
*/ */
ret = tb_dp_cm_handshake(in, out, 1500); ret = tb_dp_cm_handshake(in, out, 3000);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1929,6 +1929,11 @@ static int dwc3_remove(struct platform_device *pdev) ...@@ -1929,6 +1929,11 @@ static int dwc3_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
/*
* HACK: Clear the driver data, which is currently accessed by parent
* glue drivers, before allowing the parent to suspend.
*/
platform_set_drvdata(pdev, NULL);
pm_runtime_set_suspended(&pdev->dev); pm_runtime_set_suspended(&pdev->dev);
dwc3_free_event_buffers(dwc); dwc3_free_event_buffers(dwc);
......
...@@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) ...@@ -308,7 +308,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
/* Only usable in contexts where the role can not change. */ /* Only usable in contexts where the role can not change. */
static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
{ {
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); struct dwc3 *dwc;
/*
* FIXME: Fix this layering violation.
*/
dwc = platform_get_drvdata(qcom->dwc3);
/* Core driver may not have probed yet. */
if (!dwc)
return false;
return dwc->xhci; return dwc->xhci;
} }
......
...@@ -198,6 +198,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, ...@@ -198,6 +198,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
list_del(&req->list); list_del(&req->list);
req->remaining = 0; req->remaining = 0;
req->needs_extra_trb = false; req->needs_extra_trb = false;
req->num_trbs = 0;
if (req->request.status == -EINPROGRESS) if (req->request.status == -EINPROGRESS)
req->request.status = status; req->request.status = status;
......
This diff is collapsed.
...@@ -2877,9 +2877,9 @@ static int renesas_usb3_probe(struct platform_device *pdev) ...@@ -2877,9 +2877,9 @@ static int renesas_usb3_probe(struct platform_device *pdev)
struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent); struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent);
usb3->drd_reg = ddata->reg; usb3->drd_reg = ddata->reg;
ret = devm_request_irq(ddata->dev, ddata->drd_irq, ret = devm_request_irq(&pdev->dev, ddata->drd_irq,
renesas_usb3_otg_irq, 0, renesas_usb3_otg_irq, 0,
dev_name(ddata->dev), usb3); dev_name(&pdev->dev), usb3);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
......
...@@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb); ...@@ -248,6 +248,8 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_VENDOR_ID 0x2c7c #define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */ /* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EM061K_LTA 0x0123
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_EG95 0x0195
...@@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb); ...@@ -266,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200U 0x0901 #define QUECTEL_PRODUCT_EC200U 0x0901
#define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
#define QUECTEL_PRODUCT_EM061K_LCN 0x6009
#define QUECTEL_PRODUCT_EC200T 0x6026 #define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001 #define QUECTEL_PRODUCT_RM500K 0x7001
...@@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = { ...@@ -1189,6 +1193,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LMS, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LTA, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LWW, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
......
...@@ -95,7 +95,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -95,7 +95,7 @@ peak_current_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t static ssize_t
fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf) fast_role_swap_current_show(struct device *dev, struct device_attribute *attr, char *buf)
{ {
return sysfs_emit(buf, "%u\n", to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3; return sysfs_emit(buf, "%u\n", (to_pdo(dev)->pdo >> PDO_FIXED_FRS_CURR_SHIFT) & 3);
} }
static DEVICE_ATTR_RO(fast_role_swap_current); static DEVICE_ATTR_RO(fast_role_swap_current);
......
...@@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) ...@@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (ret) if (ret)
return ret; return ret;
if (cci & UCSI_CCI_BUSY) { if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY)
ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0); return ucsi_exec_command(ucsi, UCSI_CANCEL);
return -EBUSY;
}
if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO; return -EIO;
...@@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) ...@@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
return ucsi_read_error(ucsi); return ucsi_read_error(ucsi);
} }
if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) {
ret = ucsi_acknowledge_command(ucsi);
return ret ? ret : -EBUSY;
}
return UCSI_CCI_LENGTH(cci); return UCSI_CCI_LENGTH(cci);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment