Commit 0ec57cfa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'usb-6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb

Pull USB / Thunderbolt driver fixes from Greg KH:
 "Here are a small set of USB and Thunderbolt driver fixes for reported
  problems and a documentation update, for 6.3-rc4.

  Included in here are:

   - documentation update for uvc gadget driver

   - small thunderbolt driver fixes

   - cdns3 driver fixes

   - dwc3 driver fixes

   - dwc2 driver fixes

   - chipidea driver fixes

   - typec driver fixes

   - onboard_usb_hub device id updates

   - quirk updates

  All of these have been in linux-next with no reported problems"

* tag 'usb-6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (30 commits)
  usb: dwc2: fix a race, don't power off/on phy for dual-role mode
  usb: dwc2: fix a devres leak in hw_enable upon suspend resume
  usb: chipidea: core: fix possible concurrent when switch role
  usb: chipdea: core: fix return -EINVAL if request role is the same with current role
  thunderbolt: Rename shadowed variables bit to interrupt_bit and auto_clear_bit
  thunderbolt: Disable interrupt auto clear for rings
  thunderbolt: Use const qualifier for `ring_interrupt_index`
  usb: gadget: Use correct endianness of the wLength field for WebUSB
  uas: Add US_FL_NO_REPORT_OPCODES for JMicron JMS583Gen 2
  usb: cdnsp: changes PCI Device ID to fix conflict with CNDS3 driver
  usb: cdns3: Fix issue with using incorrect PCI device function
  usb: cdnsp: Fixes issue with redundant Status Stage
  MAINTAINERS: make me a reviewer of USB/IP
  thunderbolt: Use scale field when allocating USB3 bandwidth
  thunderbolt: Limit USB3 bandwidth of certain Intel USB4 host routers
  thunderbolt: Call tb_check_quirks() after initializing adapters
  thunderbolt: Add missing UNSET_INBOUND_SBTX for retimer access
  thunderbolt: Fix memory leak in margining
  usb: dwc2: drd: fix inconsistent mode if role-switch-default-mode="host"
  docs: usb: Add documentation for the UVC Gadget
  ...
parents 18940c88 50213832
This diff is collapsed.
...@@ -16,6 +16,7 @@ USB support ...@@ -16,6 +16,7 @@ USB support
gadget_multi gadget_multi
gadget_printer gadget_printer
gadget_serial gadget_serial
gadget_uvc
gadget-testing gadget-testing
iuu_phoenix iuu_phoenix
mass-storage mass-storage
......
...@@ -21643,6 +21643,7 @@ USB OVER IP DRIVER ...@@ -21643,6 +21643,7 @@ USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com> M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuah@kernel.org> M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org> M: Shuah Khan <skhan@linuxfoundation.org>
R: Hongren Zheng <i@zenithal.me>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/usb/usbip_protocol.rst F: Documentation/usb/usbip_protocol.rst
......
...@@ -942,6 +942,7 @@ static void margining_port_remove(struct tb_port *port) ...@@ -942,6 +942,7 @@ static void margining_port_remove(struct tb_port *port)
snprintf(dir_name, sizeof(dir_name), "port%d", port->port); snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
if (parent)
debugfs_remove_recursive(debugfs_lookup("margining", parent)); debugfs_remove_recursive(debugfs_lookup("margining", parent));
kfree(port->usb4->margining); kfree(port->usb4->margining);
...@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw) ...@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)
static void margining_switch_remove(struct tb_switch *sw) static void margining_switch_remove(struct tb_switch *sw)
{ {
struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw; struct tb_switch *parent_sw;
struct tb_port *downstream;
u64 route = tb_route(sw); u64 route = tb_route(sw);
if (!route) if (!route)
return; return;
/* upstream = tb_upstream_port(sw);
* Upstream is removed with the router itself but we need to
* remove the downstream port margining directory.
*/
parent_sw = tb_switch_parent(sw); parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw); downstream = tb_port_at(route, parent_sw);
margining_port_remove(upstream);
margining_port_remove(downstream); margining_port_remove(downstream);
} }
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0) #define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1) #define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring) static int ring_interrupt_index(const struct tb_ring *ring)
{ {
int bit = ring->hop; int bit = ring->hop;
if (!ring->is_tx) if (!ring->is_tx)
...@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) ...@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
{ {
int reg = REG_RING_INTERRUPT_BASE + int reg = REG_RING_INTERRUPT_BASE +
ring_interrupt_index(ring) / 32 * 4; ring_interrupt_index(ring) / 32 * 4;
int bit = ring_interrupt_index(ring) & 31; int interrupt_bit = ring_interrupt_index(ring) & 31;
int mask = 1 << bit; int mask = 1 << interrupt_bit;
u32 old, new; u32 old, new;
if (ring->irq > 0) { if (ring->irq > 0) {
u32 step, shift, ivr, misc; u32 step, shift, ivr, misc;
void __iomem *ivr_base; void __iomem *ivr_base;
int auto_clear_bit;
int index; int index;
if (ring->is_tx) if (ring->is_tx)
...@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) ...@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else else
index = ring->hop + ring->nhi->hop_count; index = ring->hop + ring->nhi->hop_count;
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
/* /*
* Ask the hardware to clear interrupt status * Intel routers support a bit that isn't part of
* bits automatically since we already know * the USB4 spec to ask the hardware to clear
* which interrupt was triggered. * interrupt status bits automatically since
* we already know which interrupt was triggered.
*
* Other routers explicitly disable auto-clear
* to prevent conditions that may occur where two
* MSIX interrupts are simultaneously active and
* reading the register clears both of them.
*/ */
misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
misc |= REG_DMA_MISC_INT_AUTO_CLEAR; auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); else
} auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
} if (!(misc & auto_clear_bit))
iowrite32(misc | auto_clear_bit,
ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
...@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) ...@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
dev_dbg(&ring->nhi->pdev->dev, dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n", "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
active ? "enabling" : "disabling", reg, bit, old, new); active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old) if (new == old)
dev_WARN(&ring->nhi->pdev->dev, dev_WARN(&ring->nhi->pdev->dev,
...@@ -393,13 +401,16 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete); ...@@ -393,13 +401,16 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static void ring_clear_msix(const struct tb_ring *ring) static void ring_clear_msix(const struct tb_ring *ring)
{ {
int bit;
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return; return;
bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx) if (ring->is_tx)
ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE); iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else else
ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
4 * (ring->nhi->hop_count / 32)); 4 * (ring->nhi->hop_count / 32));
} }
......
...@@ -77,12 +77,13 @@ struct ring_desc { ...@@ -77,12 +77,13 @@ struct ring_desc {
/* /*
* three bitfields: tx, rx, rx overflow * three bitfields: tx, rx, rx overflow
* Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
* cleared on read. New interrupts are fired only after ALL registers have been * New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings). * read (even those containing only disabled rings).
*/ */
#define REG_RING_NOTIFY_BASE 0x37800 #define REG_RING_NOTIFY_BASE 0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32) #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
#define REG_RING_INT_CLEAR 0x37808
/* /*
* two bitfields: rx, tx * two bitfields: rx, tx
...@@ -105,6 +106,7 @@ struct ring_desc { ...@@ -105,6 +106,7 @@ struct ring_desc {
#define REG_DMA_MISC 0x39864 #define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2) #define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
#define REG_INMAIL_DATA 0x39900 #define REG_INMAIL_DATA 0x39900
......
...@@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw) ...@@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
} }
} }
static void quirk_clx_disable(struct tb_switch *sw)
{
sw->quirks |= QUIRK_NO_CLX;
tb_sw_dbg(sw, "disabling CL states\n");
}
static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_usb3_down(port))
continue;
port->max_bw = 16376;
tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
port->max_bw);
}
}
struct tb_quirk { struct tb_quirk {
u16 hw_vendor_id; u16 hw_vendor_id;
u16 hw_device_id; u16 hw_device_id;
...@@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = { ...@@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = {
* DP buffers. * DP buffers.
*/ */
{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation }, { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
/*
* Limit the maximum USB3 bandwidth for the following Intel USB4
* host routers due to a hardware issue.
*/
{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
}; };
/** /**
......
...@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev, ...@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret; return ret;
} }
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
int i;
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_set_inbound_sbtx(port, i);
}
static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
int i;
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
usb4_port_retimer_unset_inbound_sbtx(port, i);
}
static ssize_t nvm_authenticate_store(struct device *dev, static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count) struct device_attribute *attr, const char *buf, size_t count)
{ {
...@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, ...@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0; rt->auth_status = 0;
if (val) { if (val) {
tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) { if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true); ret = tb_retimer_nvm_authenticate(rt, true);
} else { } else {
...@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, ...@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
} }
exit_unlock: exit_unlock:
tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock); mutex_unlock(&rt->tb->lock);
exit_rpm: exit_rpm:
pm_runtime_mark_last_busy(&rt->dev); pm_runtime_mark_last_busy(&rt->dev);
...@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add) ...@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
* Enable sideband channel for each retimer. We can do this * Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not. * regardless whether there is device connected or not.
*/ */
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) tb_retimer_set_inbound_sbtx(port);
usb4_port_retimer_set_inbound_sbtx(port, i);
/* /*
* Before doing anything else, read the authentication status. * Before doing anything else, read the authentication status.
...@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add) ...@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break; break;
} }
tb_retimer_unset_inbound_sbtx(port);
if (!last_idx) if (!last_idx)
return 0; return 0;
......
...@@ -20,6 +20,7 @@ enum usb4_sb_opcode { ...@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */ USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */ USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
......
...@@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw) ...@@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw)
dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
tb_check_quirks(sw);
ret = tb_switch_set_uuid(sw); ret = tb_switch_set_uuid(sw);
if (ret) { if (ret) {
dev_err(&sw->dev, "failed to set UUID\n"); dev_err(&sw->dev, "failed to set UUID\n");
...@@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw) ...@@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw)
} }
} }
tb_check_quirks(sw);
tb_switch_default_link_ports(sw); tb_switch_default_link_ports(sw);
ret = tb_switch_update_link_attributes(sw); ret = tb_switch_update_link_attributes(sw);
......
...@@ -23,6 +23,11 @@ ...@@ -23,6 +23,11 @@
#define NVM_MAX_SIZE SZ_512K #define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16 #define NVM_DATA_DWORDS 16
/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
/** /**
* struct tb_nvm - Structure holding NVM information * struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM * @dev: Owner of the NVM
...@@ -267,6 +272,8 @@ struct tb_bandwidth_group { ...@@ -267,6 +272,8 @@ struct tb_bandwidth_group {
* @group: Bandwidth allocation group the adapter is assigned to. Only * @group: Bandwidth allocation group the adapter is assigned to. Only
* used for DP IN adapters for now. * used for DP IN adapters for now.
* @group_list: The adapter is linked to the group's list of ports through this * @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
* *
* In USB4 terminology this structure represents an adapter (protocol or * In USB4 terminology this structure represents an adapter (protocol or
* lane adapter). * lane adapter).
...@@ -294,6 +301,7 @@ struct tb_port { ...@@ -294,6 +301,7 @@ struct tb_port {
unsigned int dma_credits; unsigned int dma_credits;
struct tb_bandwidth_group *group; struct tb_bandwidth_group *group;
struct list_head group_list; struct list_head group_list;
unsigned int max_bw;
}; };
/** /**
...@@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw, ...@@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
*/ */
static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
{ {
if (sw->quirks & QUIRK_NO_CLX)
return false;
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
} }
...@@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, ...@@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size); u8 size);
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
...@@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port); ...@@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4); void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4);
/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
void tb_check_quirks(struct tb_switch *sw); void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
......
...@@ -1578,6 +1578,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) ...@@ -1578,6 +1578,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
500); 500);
} }
/**
* usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
* @port: USB4 port
* @index: Retimer index
*
* Disables sideband channel transations on SBTX. The reverse of
* usb4_port_retimer_set_inbound_sbtx().
*/
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
{
return usb4_port_retimer_op(port, index,
USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
}
/** /**
* usb4_port_retimer_read() - Read from retimer sideband registers * usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port * @port: USB4 port
...@@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, ...@@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
usb4_port_retimer_nvm_read_block, &info); usb4_port_retimer_nvm_read_block, &info);
} }
static inline unsigned int
usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
{
/* Take the possible bandwidth limitation into account */
if (port->max_bw)
return min(bw, port->max_bw);
return bw;
}
/** /**
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
* @port: USB3 adapter port * @port: USB3 adapter port
...@@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port) ...@@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
return ret; return ret;
lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
return usb4_usb3_port_max_bandwidth(port, ret);
} }
/** /**
...@@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port) ...@@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port)
return 0; return 0;
lr = val & ADP_USB3_CS_4_ALR_MASK; lr = val & ADP_USB3_CS_4_ALR_MASK;
return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
return usb4_usb3_port_max_bandwidth(port, ret);
} }
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
...@@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, ...@@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
int downstream_bw) int downstream_bw)
{ {
u32 val, ubw, dbw, scale; u32 val, ubw, dbw, scale;
int ret; int ret, max_bw;
/* Read the used scale, hardware default is 0 */ /* Figure out suitable scale */
ret = tb_port_read(port, &scale, TB_CFG_PORT, scale = 0;
max_bw = max(upstream_bw, downstream_bw);
while (scale < 64) {
if (mbps_to_usb3_bw(max_bw, scale) < 4096)
break;
scale++;
}
if (WARN_ON(scale >= 64))
return -EINVAL;
ret = tb_port_write(port, &scale, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_3, 1); port->cap_adap + ADP_USB3_CS_3, 1);
if (ret) if (ret)
return ret; return ret;
scale &= ADP_USB3_CS_3_SCALE_MASK;
ubw = mbps_to_usb3_bw(upstream_bw, scale); ubw = mbps_to_usb3_bw(upstream_bw, scale);
dbw = mbps_to_usb3_bw(downstream_bw, scale); dbw = mbps_to_usb3_bw(downstream_bw, scale);
tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
ret = tb_port_read(port, &val, TB_CFG_PORT, ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1); port->cap_adap + ADP_USB3_CS_2, 1);
if (ret) if (ret)
......
...@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev) ...@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
return NULL; return NULL;
} }
if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
func->devfn != PCI_DEV_FN_OTG) {
return NULL;
}
return func; return func;
} }
......
...@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, ...@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
case USB_REQ_SET_ISOCH_DELAY: case USB_REQ_SET_ISOCH_DELAY:
ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl); ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
break; break;
case USB_REQ_SET_INTERFACE:
/*
* Add request into pending list to block sending status stage
* by libcomposite.
*/
list_add_tail(&pdev->ep0_preq.list,
&pdev->ep0_preq.pep->pending_list);
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
if (ret == -EBUSY)
ret = 0;
list_del(&pdev->ep0_preq.list);
break;
default: default:
ret = cdnsp_ep0_delegate_req(pdev, ctrl); ret = cdnsp_ep0_delegate_req(pdev, ctrl);
break; break;
...@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) ...@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
else else
ret = cdnsp_ep0_delegate_req(pdev, ctrl); ret = cdnsp_ep0_delegate_req(pdev, ctrl);
if (!len)
pdev->ep0_stage = CDNSP_STATUS_STAGE;
if (ret == USB_GADGET_DELAYED_STATUS) { if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdnsp_ep0_status_stage("delayed"); trace_cdnsp_ep0_status_stage("delayed");
return; return;
...@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) ...@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
out: out:
if (ret < 0) if (ret < 0)
cdnsp_ep0_stall(pdev); cdnsp_ep0_stall(pdev);
else if (pdev->ep0_stage == CDNSP_STATUS_STAGE) else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
cdnsp_status_stage(pdev); cdnsp_status_stage(pdev);
} }
...@@ -29,30 +29,23 @@ ...@@ -29,30 +29,23 @@
#define PLAT_DRIVER_NAME "cdns-usbssp" #define PLAT_DRIVER_NAME "cdns-usbssp"
#define CDNS_VENDOR_ID 0x17cd #define CDNS_VENDOR_ID 0x17cd
#define CDNS_DEVICE_ID 0x0100 #define CDNS_DEVICE_ID 0x0200
#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{ {
struct pci_dev *func;
/* /*
* Gets the second function. * Gets the second function.
* It's little tricky, but this platform has two function. * Platform has two function. The fist keeps resources for
* The fist keeps resources for Host/Device while the second * Host/Device while the secon keeps resources for DRD/OTG.
* keeps resources for DRD/OTG.
*/ */
func = pci_get_device(pdev->vendor, pdev->device, NULL); if (pdev->device == CDNS_DEVICE_ID)
if (!func) return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
return NULL; else if (pdev->device == CDNS_DRD_ID)
return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
if (func->devfn == pdev->devfn) {
func = pci_get_device(pdev->vendor, pdev->device, func);
if (!func)
return NULL; return NULL;
}
return func;
} }
static int cdnsp_pci_probe(struct pci_dev *pdev, static int cdnsp_pci_probe(struct pci_dev *pdev,
...@@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = { ...@@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID }, CDNS_DRD_IF, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
{ 0, } { 0, }
}; };
......
...@@ -208,6 +208,7 @@ struct hw_bank { ...@@ -208,6 +208,7 @@ struct hw_bank {
* @in_lpm: if the core in low power mode * @in_lpm: if the core in low power mode
* @wakeup_int: if wakeup interrupt occur * @wakeup_int: if wakeup interrupt occur
* @rev: The revision number for controller * @rev: The revision number for controller
* @mutex: protect code from concorrent running when doing role switch
*/ */
struct ci_hdrc { struct ci_hdrc {
struct device *dev; struct device *dev;
...@@ -260,6 +261,7 @@ struct ci_hdrc { ...@@ -260,6 +261,7 @@ struct ci_hdrc {
bool in_lpm; bool in_lpm;
bool wakeup_int; bool wakeup_int;
enum ci_revision rev; enum ci_revision rev;
struct mutex mutex;
}; };
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci) static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
......
...@@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev, ...@@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev,
strlen(ci->roles[role]->name))) strlen(ci->roles[role]->name)))
break; break;
if (role == CI_ROLE_END || role == ci->role) if (role == CI_ROLE_END)
return -EINVAL; return -EINVAL;
mutex_lock(&ci->mutex);
if (role == ci->role) {
mutex_unlock(&ci->mutex);
return n;
}
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
disable_irq(ci->irq); disable_irq(ci->irq);
ci_role_stop(ci); ci_role_stop(ci);
...@@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev, ...@@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev,
ci_handle_vbus_change(ci); ci_handle_vbus_change(ci);
enable_irq(ci->irq); enable_irq(ci->irq);
pm_runtime_put_sync(dev); pm_runtime_put_sync(dev);
mutex_unlock(&ci->mutex);
return (ret == 0) ? n : ret; return (ret == 0) ? n : ret;
} }
...@@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) ...@@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&ci->lock); spin_lock_init(&ci->lock);
mutex_init(&ci->mutex);
ci->dev = dev; ci->dev = dev;
ci->platdata = dev_get_platdata(dev); ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags & ci->imx28_write_fix = !!(ci->platdata->flags &
......
...@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci) ...@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
void ci_handle_id_switch(struct ci_hdrc *ci) void ci_handle_id_switch(struct ci_hdrc *ci)
{ {
enum ci_role role = ci_otg_role(ci); enum ci_role role;
mutex_lock(&ci->mutex);
role = ci_otg_role(ci);
if (role != ci->role) { if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n", dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name); ci_role(ci)->name, ci->roles[role]->name);
...@@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci) ...@@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci)
if (role == CI_ROLE_GADGET) if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci); ci_handle_vbus_change(ci);
} }
mutex_unlock(&ci->mutex);
} }
/** /**
* ci_otg_work - perform otg (vbus/id) event handle * ci_otg_work - perform otg (vbus/id) event handle
......
...@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg) ...@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
spin_unlock_irqrestore(&hsotg->lock, flags); spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST)); dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
} }
static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid) static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
......
...@@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, ...@@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
hsotg->gadget.dev.of_node = hsotg->dev->of_node; hsotg->gadget.dev.of_node = hsotg->dev->of_node;
hsotg->gadget.speed = USB_SPEED_UNKNOWN; hsotg->gadget.speed = USB_SPEED_UNKNOWN;
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
(hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) {
ret = dwc2_lowlevel_hw_enable(hsotg); ret = dwc2_lowlevel_hw_enable(hsotg);
if (ret) if (ret)
goto err; goto err;
...@@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) ...@@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
if (!IS_ERR_OR_NULL(hsotg->uphy)) if (!IS_ERR_OR_NULL(hsotg->uphy))
otg_set_peripheral(hsotg->uphy->otg, NULL); otg_set_peripheral(hsotg->uphy->otg, NULL);
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
(hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
dwc2_lowlevel_hw_disable(hsotg); dwc2_lowlevel_hw_disable(hsotg);
return 0; return 0;
......
...@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg) ...@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
return 0; return 0;
} }
static void __dwc2_disable_regulators(void *data)
{
struct dwc2_hsotg *hsotg = data;
regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
}
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{ {
struct platform_device *pdev = to_platform_device(hsotg->dev); struct platform_device *pdev = to_platform_device(hsotg->dev);
...@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) ...@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
if (ret) if (ret)
return ret; return ret;
ret = devm_add_action_or_reset(&pdev->dev,
__dwc2_disable_regulators, hsotg);
if (ret)
return ret;
if (hsotg->clk) { if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk); ret = clk_prepare_enable(hsotg->clk);
if (ret) if (ret)
...@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg) ...@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
if (hsotg->clk) if (hsotg->clk)
clk_disable_unprepare(hsotg->clk); clk_disable_unprepare(hsotg->clk);
return 0; return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
} }
/** /**
...@@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev) ...@@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
dwc2_debugfs_init(hsotg); dwc2_debugfs_init(hsotg);
/* Gadget code manages lowlevel hw on its own */ /* Gadget code manages lowlevel hw on its own */
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
(hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
dwc2_lowlevel_hw_disable(hsotg); dwc2_lowlevel_hw_disable(hsotg);
#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
...@@ -608,7 +595,7 @@ static int dwc2_driver_probe(struct platform_device *dev) ...@@ -608,7 +595,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (hsotg->params.activate_stm_id_vb_detection) if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d); regulator_disable(hsotg->usb33d);
error: error:
if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg); dwc2_lowlevel_hw_disable(hsotg);
return retval; return retval;
} }
......
...@@ -1098,7 +1098,7 @@ struct dwc3_scratchpad_array { ...@@ -1098,7 +1098,7 @@ struct dwc3_scratchpad_array {
* change quirk. * change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit. * check during HS transmit.
* @resume-hs-terminations: Set if we enable quirk for fixing improper crc * @resume_hs_terminations: Set if we enable quirk for fixing improper crc
* generation after resume from suspend. * generation after resume from suspend.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode. * instances in park mode.
......
...@@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc) ...@@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
*/ */
static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt) static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
{ {
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params; struct dwc3_gadget_ep_cmd_params params;
u32 cmd; u32 cmd;
int ret; int ret;
...@@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int ...@@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
dep->resource_index = 0; dep->resource_index = 0;
if (!interrupt) if (!interrupt) {
if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
mdelay(1);
dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else if (!ret) } else if (!ret) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING; dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
}
dep->flags &= ~DWC3_EP_DELAY_STOP; dep->flags &= ~DWC3_EP_DELAY_STOP;
return ret; return ret;
...@@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, ...@@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* enabled, the EndTransfer command will have completed upon * enabled, the EndTransfer command will have completed upon
* returning from this function. * returning from this function.
* *
* This mode is NOT available on the DWC_usb31 IP. * This mode is NOT available on the DWC_usb31 IP. In this
* case, if the IOC bit is not set, then delay by 1ms
* after issuing the EndTransfer command. This allows for the
* controller to handle the command completely before DWC3
* remove requests attempts to unmap USB request buffers.
*/ */
__dwc3_stop_active_transfer(dep, force, interrupt); __dwc3_stop_active_transfer(dep, force, interrupt);
......
...@@ -2079,9 +2079,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) ...@@ -2079,9 +2079,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
sizeof(url_descriptor->URL) sizeof(url_descriptor->URL)
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset); - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length)
+ landing_page_length) landing_page_length = w_length
landing_page_length = ctrl->wLength
- WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset; - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
memcpy(url_descriptor->URL, memcpy(url_descriptor->URL,
......
...@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio) ...@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
uac = g_audio->uac; uac = g_audio->uac;
card = uac->card; card = uac->card;
if (card) if (card)
snd_card_free(card); snd_card_free_when_closed(card);
kfree(uac->p_prm.reqs); kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs); kfree(uac->c_prm.reqs);
......
...@@ -410,6 +410,7 @@ static const struct usb_device_id onboard_hub_id_table[] = { ...@@ -410,6 +410,7 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
......
...@@ -36,6 +36,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = { ...@@ -36,6 +36,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
static const struct of_device_id onboard_hub_match[] = { static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, }, { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
......
...@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, ...@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL, USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA), US_FL_BROKEN_FUA),
/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
"JMicron",
"JMS583Gen 2",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
"PNY", "PNY",
......
...@@ -1445,10 +1445,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams) ...@@ -1445,10 +1445,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt) const u32 *data, int cnt)
{ {
u32 vdo_hdr = port->vdo_data[0];
WARN_ON(!mutex_is_locked(&port->lock)); WARN_ON(!mutex_is_locked(&port->lock));
/* If is sending discover_identity, handle received message first */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
port->send_discover = true;
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
} else {
/* Make sure we are not still processing a previous VDM packet */ /* Make sure we are not still processing a previous VDM packet */
WARN_ON(port->vdm_state > VDM_STATE_DONE); WARN_ON(port->vdm_state > VDM_STATE_DONE);
}
port->vdo_count = cnt + 1; port->vdo_count = cnt + 1;
port->vdo_data[0] = header; port->vdo_data[0] = header;
...@@ -1948,11 +1956,13 @@ static void vdm_run_state_machine(struct tcpm_port *port) ...@@ -1948,11 +1956,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
switch (PD_VDO_CMD(vdo_hdr)) { switch (PD_VDO_CMD(vdo_hdr)) {
case CMD_DISCOVER_IDENT: case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY); res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) if (res == 0) {
port->send_discover = false; port->send_discover = false;
else if (res == -EAGAIN) } else if (res == -EAGAIN) {
port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port, mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS); SEND_DISCOVER_RETRY_MS);
}
break; break;
case CMD_DISCOVER_SVID: case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS); res = tcpm_ams_start(port, DISCOVER_SVIDS);
...@@ -2035,6 +2045,7 @@ static void vdm_run_state_machine(struct tcpm_port *port) ...@@ -2035,6 +2045,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
unsigned long timeout; unsigned long timeout;
port->vdm_retries = 0; port->vdm_retries = 0;
port->vdo_data[0] = 0;
port->vdm_state = VDM_STATE_BUSY; port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(vdo_hdr); timeout = vdm_ready_timeout(vdo_hdr);
mod_vdm_delayed_work(port, timeout); mod_vdm_delayed_work(port, timeout);
...@@ -4570,6 +4581,9 @@ static void run_state_machine(struct tcpm_port *port) ...@@ -4570,6 +4581,9 @@ static void run_state_machine(struct tcpm_port *port)
case SOFT_RESET: case SOFT_RESET:
port->message_id = 0; port->message_id = 0;
port->rx_msgid = -1; port->rx_msgid = -1;
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT); tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
tcpm_ams_finish(port); tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) { if (port->pwr_role == TYPEC_SOURCE) {
...@@ -4589,6 +4603,9 @@ static void run_state_machine(struct tcpm_port *port) ...@@ -4589,6 +4603,9 @@ static void run_state_machine(struct tcpm_port *port)
case SOFT_RESET_SEND: case SOFT_RESET_SEND:
port->message_id = 0; port->message_id = 0;
port->rx_msgid = -1; port->rx_msgid = -1;
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
tcpm_set_state_cond(port, hard_reset_state(port), 0); tcpm_set_state_cond(port, hard_reset_state(port), 0);
else else
...@@ -4718,6 +4735,9 @@ static void run_state_machine(struct tcpm_port *port) ...@@ -4718,6 +4735,9 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_STARTUP, 0); tcpm_set_state(port, SNK_STARTUP, 0);
break; break;
case PR_SWAP_SNK_SRC_SINK_OFF: case PR_SWAP_SNK_SRC_SINK_OFF:
/* will be source, remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
/* /*
* Prevent vbus discharge circuit from turning on during PR_SWAP * Prevent vbus discharge circuit from turning on during PR_SWAP
* as this is not a disconnect. * as this is not a disconnect.
......
...@@ -1125,12 +1125,11 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con) ...@@ -1125,12 +1125,11 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
return NULL; return NULL;
} }
static int ucsi_register_port(struct ucsi *ucsi, int index) static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
{ {
struct usb_power_delivery_desc desc = { ucsi->cap.pd_version}; struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
struct usb_power_delivery_capabilities_desc pd_caps; struct usb_power_delivery_capabilities_desc pd_caps;
struct usb_power_delivery_capabilities *pd_cap; struct usb_power_delivery_capabilities *pd_cap;
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap; struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory; enum typec_accessory *accessory = cap->accessory;
enum usb_role u_role = USB_ROLE_NONE; enum usb_role u_role = USB_ROLE_NONE;
...@@ -1151,7 +1150,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) ...@@ -1151,7 +1150,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
init_completion(&con->complete); init_completion(&con->complete);
mutex_init(&con->lock); mutex_init(&con->lock);
INIT_LIST_HEAD(&con->partner_tasks); INIT_LIST_HEAD(&con->partner_tasks);
con->num = index + 1;
con->ucsi = ucsi; con->ucsi = ucsi;
cap->fwnode = ucsi_find_fwnode(con); cap->fwnode = ucsi_find_fwnode(con);
...@@ -1328,8 +1326,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) ...@@ -1328,8 +1326,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
*/ */
static int ucsi_init(struct ucsi *ucsi) static int ucsi_init(struct ucsi *ucsi)
{ {
struct ucsi_connector *con; struct ucsi_connector *con, *connector;
u64 command; u64 command, ntfy;
int ret; int ret;
int i; int i;
...@@ -1341,8 +1339,8 @@ static int ucsi_init(struct ucsi *ucsi) ...@@ -1341,8 +1339,8 @@ static int ucsi_init(struct ucsi *ucsi)
} }
/* Enable basic notifications */ /* Enable basic notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR; ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0); ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0) if (ret < 0)
goto err_reset; goto err_reset;
...@@ -1359,31 +1357,33 @@ static int ucsi_init(struct ucsi *ucsi) ...@@ -1359,31 +1357,33 @@ static int ucsi_init(struct ucsi *ucsi)
} }
/* Allocate the connectors. Released in ucsi_unregister() */ /* Allocate the connectors. Released in ucsi_unregister() */
ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1, connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
sizeof(*ucsi->connector), GFP_KERNEL); if (!connector) {
if (!ucsi->connector) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_reset; goto err_reset;
} }
/* Register all connectors */ /* Register all connectors */
for (i = 0; i < ucsi->cap.num_connectors; i++) { for (i = 0; i < ucsi->cap.num_connectors; i++) {
ret = ucsi_register_port(ucsi, i); connector[i].num = i + 1;
ret = ucsi_register_port(ucsi, &connector[i]);
if (ret) if (ret)
goto err_unregister; goto err_unregister;
} }
/* Enable all notifications */ /* Enable all notifications */
ucsi->ntfy = UCSI_ENABLE_NTFY_ALL; ntfy = UCSI_ENABLE_NTFY_ALL;
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0); ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0) if (ret < 0)
goto err_unregister; goto err_unregister;
ucsi->connector = connector;
ucsi->ntfy = ntfy;
return 0; return 0;
err_unregister: err_unregister:
for (con = ucsi->connector; con->port; con++) { for (con = connector; con->port; con++) {
ucsi_unregister_partner(con); ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con); ucsi_unregister_port_psy(con);
...@@ -1399,10 +1399,7 @@ static int ucsi_init(struct ucsi *ucsi) ...@@ -1399,10 +1399,7 @@ static int ucsi_init(struct ucsi *ucsi)
typec_unregister_port(con->port); typec_unregister_port(con->port);
con->port = NULL; con->port = NULL;
} }
kfree(connector);
kfree(ucsi->connector);
ucsi->connector = NULL;
err_reset: err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap)); memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi); ucsi_reset_ppm(ucsi);
......
...@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, ...@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
if (ret) if (ret)
goto out_clear_bit; goto out_clear_bit;
if (!wait_for_completion_timeout(&ua->complete, HZ)) if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
out_clear_bit: out_clear_bit:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment