Commit 59ee364b authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.10-rc1' of...

Merge tag 'thunderbolt-for-v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.10 merge window

This includes following Thunderbolt/USB4 changes for v5.10 merge window:

  * A couple of optimizations around Tiger Lake force power logic and
    NHI (Native Host Interface) LC (Link Controller) mailbox command
    processing

  * Power management improvements for Software Connection Manager

  * Debugfs support

  * Allow KUnit tests to be enabled also when Thunderbolt driver is
    configured as module.

  * Few minor cleanups and fixes

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (37 commits)
  thunderbolt: Capitalize comment on top of QUIRK_FORCE_POWER_LINK_CONTROLLER
  thunderbolt: Correct tb_check_quirks() kernel-doc
  thunderbolt: Log correct zeroX entries in decode_error()
  thunderbolt: Handle ERR_LOCK notification
  thunderbolt: Use "if USB4" instead of "depends on" in Kconfig
  thunderbolt: Allow KUnit tests to be built also when CONFIG_USB4=m
  thunderbolt: Only stop control channel when entering freeze
  thunderbolt: debugfs: Fix uninitialized return in counters_write()
  thunderbolt: Add debugfs interface
  thunderbolt: No need to warn in TB_CFG_ERROR_INVALID_CONFIG_SPACE
  thunderbolt: Introduce tb_switch_is_tiger_lake()
  thunderbolt: Introduce tb_switch_is_ice_lake()
  thunderbolt: Check for Intel vendor ID when identifying controller
  thunderbolt: Introduce tb_port_is_nhi()
  thunderbolt: Introduce tb_switch_next_cap()
  thunderbolt: Introduce tb_port_next_cap()
  thunderbolt: Move struct tb_cap_any to tb_regs.h
  thunderbolt: Add runtime PM for Software CM
  thunderbolt: Create device links from ACPI description
  ACPI: Export acpi_get_first_physical_node() to modules
  ...
parents bf1c6744 810278da
......@@ -551,6 +551,7 @@ struct device *acpi_get_first_physical_node(struct acpi_device *adev)
mutex_unlock(physical_node_lock);
return phys_dev;
}
EXPORT_SYMBOL_GPL(acpi_get_first_physical_node);
static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
const struct device *dev)
......
......@@ -3673,63 +3673,6 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt);
/*
* Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
*
* During suspend the Thunderbolt controller is reset and all PCI
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. We have to manually wait for the NHI since there is
* no parent child relationship between the NHI and the tunneled
* bridges.
*/
static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
{
struct pci_dev *sibling = NULL;
struct pci_dev *nhi = NULL;
if (!x86_apple_machine)
return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
/*
* Find the NHI and confirm that we are a bridge on the Thunderbolt
* host controller and not on a Thunderbolt endpoint.
*/
sibling = pci_get_slot(dev->bus, 0x0);
if (sibling == dev)
goto out; /* we are the downstream bridge to the NHI */
if (!sibling || !sibling->subordinate)
goto out;
nhi = pci_get_slot(sibling->subordinate, 0x0);
if (!nhi)
goto out;
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
|| nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
out:
pci_dev_put(nhi);
pci_dev_put(sibling);
}
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
/*
......
......@@ -16,7 +16,19 @@ menuconfig USB4
To compile this driver a module, choose M here. The module will be
called thunderbolt.
if USB4
config USB4_DEBUGFS_WRITE
bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
help
Enables writing to device configuration registers through
debugfs interface.
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
config USB4_KUNIT_TEST
bool "KUnit tests"
depends on KUNIT=y
depends on USB4=y
endif # USB4
......@@ -4,4 +4,6 @@ thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o ee
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
thunderbolt-objs += nvm.o retimer.o quirks.o
obj-${CONFIG_USB4_KUNIT_TEST} += test.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI support
*
* Copyright (C) 2020, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/acpi.h>
#include "tb.h"
static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
void **return_value)
{
struct fwnode_reference_args args;
struct fwnode_handle *fwnode;
struct tb_nhi *nhi = data;
struct acpi_device *adev;
struct pci_dev *pdev;
struct device *dev;
int ret;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
fwnode = acpi_fwnode_handle(adev);
ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface",
NULL, 0, 0, &args);
if (ret)
return AE_OK;
/* It needs to reference this NHI */
if (nhi->pdev->dev.fwnode != args.fwnode)
goto out_put;
/*
* Try to find physical device walking upwards to the hierarcy.
* We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created.
*/
dev = acpi_get_first_physical_node(adev);
while (!dev) {
adev = adev->parent;
if (!adev)
break;
dev = acpi_get_first_physical_node(adev);
}
if (!dev)
goto out_put;
/*
* Check that the device is PCIe. This is because USB3
* SuperSpeed ports have this property and they are not power
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
while (!dev_is_pci(dev))
dev = dev->parent;
if (!dev)
goto out_put;
/*
* Check that this actually matches the type of device we
* expect. It should either be xHCI or PCIe root/downstream
* port.
*/
pdev = to_pci_dev(dev);
if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
(pci_is_pcie(pdev) &&
(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
const struct device_link *link;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
out_put:
fwnode_handle_put(args.fwnode);
return AE_OK;
}
/**
* tb_acpi_add_links() - Add device links based on ACPI description
* @nhi: Pointer to NHI
*
* Goes over ACPI namespace finding tunneled ports that reference to
* @nhi ACPI node. For each reference a device link is added. The link
* is automatically removed by the driver core.
*/
void tb_acpi_add_links(struct tb_nhi *nhi)
{
acpi_status status;
if (!has_acpi_companion(&nhi->pdev->dev))
return;
/*
* Find all devices that have usb4-host-controller interface
* property that references to this NHI.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
tb_acpi_add_link, NULL, nhi, NULL);
if (ACPI_FAILURE(status))
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
}
......@@ -15,14 +15,6 @@
#define VSE_CAP_OFFSET_MAX 0xffff
#define TMU_ACCESS_EN BIT(20)
struct tb_cap_any {
union {
struct tb_cap_basic basic;
struct tb_cap_extended_short extended_short;
struct tb_cap_extended_long extended_long;
};
} __packed;
static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{
struct tb_switch *sw = port->sw;
......@@ -67,23 +59,50 @@ static void tb_port_dummy_read(struct tb_port *port)
}
}
/**
* tb_port_next_cap() - Return next capability in the linked list
* @port: Port to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Returns dword offset of the next capability in port config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_port_next_cap(struct tb_port *port, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return port->config.first_cap_offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
return header.basic.next;
}
static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
u32 offset = 1;
int offset = 0;
do {
struct tb_cap_any header;
int ret;
offset = tb_port_next_cap(port, offset);
if (offset < 0)
return offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
offset = header.basic.next;
} while (offset);
} while (offset > 0);
return -ENOENT;
}
......@@ -113,6 +132,50 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
return ret;
}
/**
* tb_switch_next_cap() - Return next capability in the linked list
* @sw: Switch to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Finds dword offset of the next capability in router config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return sw->config.first_cap_offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
if (ret)
return ret;
switch (header.basic.cap) {
case TB_SWITCH_CAP_TMU:
ret = header.basic.next;
break;
case TB_SWITCH_CAP_VSE:
if (!header.extended_short.length)
ret = header.extended_long.next;
else
ret = header.extended_short.next;
break;
default:
tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
header.basic.cap, offset);
ret = -EINVAL;
break;
}
return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
}
/**
* tb_switch_find_cap() - Find switch capability
* @sw Switch to find the capability for
......@@ -124,21 +187,23 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
*/
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = sw->config.first_cap_offset;
int offset = 0;
while (offset > 0 && offset < CAP_OFFSET_MAX) {
do {
struct tb_cap_any header;
int ret;
offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
offset = header.basic.next;
}
} while (offset);
return -ENOENT;
}
......@@ -155,37 +220,24 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
*/
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{
struct tb_cap_any header;
int offset;
int offset = 0;
offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
if (offset < 0)
return offset;
while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
do {
struct tb_cap_any header;
int ret;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
/*
* Extended vendor specific capabilities come in two
* flavors: short and long. The latter is used when
* offset is over 0xff.
*/
if (offset >= CAP_OFFSET_MAX) {
if (header.extended_long.vsec_id == vsec)
return offset;
offset = header.extended_long.next;
} else {
if (header.extended_short.vsec_id == vsec)
return offset;
if (!header.extended_short.length)
return -ENOENT;
offset = header.extended_short.next;
}
}
if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
header.extended_short.vsec_id == vsec)
return offset;
} while (offset);
return -ENOENT;
}
......@@ -219,6 +219,7 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
......@@ -227,9 +228,13 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err)
return res;
WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
if (pkg->zero1)
tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
if (pkg->zero2)
tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
if (pkg->zero3)
tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
......@@ -266,9 +271,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
tb_ctl_WARN(ctl,
"CFG_ERROR(%llx:%x): Invalid config space or offset\n",
res->response_route, res->response_port);
tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
......@@ -283,6 +287,10 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_LOCK:
tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
res->response_route, res->response_port);
return;
default:
/* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
......@@ -951,6 +959,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
return -ENODEV;
tb_cfg_print_error(ctl, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
return -EIO;
}
......
This diff is collapsed.
......@@ -275,7 +275,7 @@ static struct attribute *domain_attrs[] = {
static umode_t domain_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct tb *tb = container_of(dev, struct tb, dev);
if (attr == &dev_attr_boot_acl.attr) {
......@@ -455,6 +455,8 @@ int tb_domain_add(struct tb *tb)
/* This starts event processing */
mutex_unlock(&tb->lock);
device_init_wakeup(&tb->dev, true);
pm_runtime_no_callbacks(&tb->dev);
pm_runtime_set_active(&tb->dev);
pm_runtime_enable(&tb->dev);
......@@ -544,6 +546,33 @@ int tb_domain_suspend(struct tb *tb)
return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
}
int tb_domain_freeze_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
if (tb->cm_ops->freeze_noirq)
ret = tb->cm_ops->freeze_noirq(tb);
if (!ret)
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
return ret;
}
int tb_domain_thaw_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
tb_ctl_start(tb->ctl);
if (tb->cm_ops->thaw_noirq)
ret = tb->cm_ops->thaw_noirq(tb);
mutex_unlock(&tb->lock);
return ret;
}
void tb_domain_complete(struct tb *tb)
{
if (tb->cm_ops->complete)
......@@ -798,12 +827,23 @@ int tb_domain_init(void)
{
int ret;
tb_test_init();
tb_debugfs_init();
ret = tb_xdomain_init();
if (ret)
return ret;
goto err_debugfs;
ret = bus_register(&tb_bus_type);
if (ret)
tb_xdomain_exit();
goto err_xdomain;
return 0;
err_xdomain:
tb_xdomain_exit();
err_debugfs:
tb_debugfs_exit();
tb_test_exit();
return ret;
}
......@@ -814,4 +854,6 @@ void tb_domain_exit(void)
ida_destroy(&tb_domain_ida);
tb_nvm_exit();
tb_xdomain_exit();
tb_debugfs_exit();
tb_test_exit();
}
......@@ -1635,11 +1635,14 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
static bool icm_tgl_is_supported(struct tb *tb)
{
u32 val;
/*
* If the firmware is not running use software CM. This platform
* should fully support both.
*/
return icm_firmware_running(tb->nhi);
val = ioread32(tb->nhi->iobase + REG_FW_STS);
return !!(val & REG_FW_STS_NVM_AUTH_DONE);
}
static void icm_handle_notification(struct work_struct *work)
......
......@@ -45,7 +45,7 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
static int tb_lc_configure_lane(struct tb_port *port, bool configure)
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
struct tb_switch *sw = port->sw;
......@@ -69,7 +69,7 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
else
lane = TB_LC_SX_CTRL_L2C;
if (configure) {
if (configured) {
ctrl |= lane;
if (upstream)
ctrl |= TB_LC_SX_CTRL_UPSTREAM;
......@@ -83,55 +83,146 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
}
/**
* tb_lc_configure_link() - Let LC know about configured link
* @sw: Switch that is being added
* tb_lc_configure_port() - Let LC know about configured port
* @port: Port that is set as configured
*
* Informs LC of both parent switch and @sw that there is established
* link between the two.
* Sets the port configured for power management purposes.
*/
int tb_lc_configure_link(struct tb_switch *sw)
int tb_lc_configure_port(struct tb_port *port)
{
struct tb_port *up, *down;
int ret;
return tb_lc_set_port_configured(port, true);
}
/**
* tb_lc_unconfigure_port() - Let LC know about unconfigured port
* @port: Port that is set as configured
*
* Sets the port unconfigured for power management purposes.
*/
void tb_lc_unconfigure_port(struct tb_port *port)
{
tb_lc_set_port_configured(port, false);
}
if (!tb_route(sw) || tb_switch_is_icm(sw))
static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
{
struct tb_switch *sw = port->sw;
u32 ctrl, lane;
int cap, ret;
if (sw->generation < 2)
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
/* Configure parent link toward this switch */
ret = tb_lc_configure_lane(down, true);
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
/* Configure upstream link from this switch to the parent */
ret = tb_lc_configure_lane(up, true);
/* Resolve correct lane */
if (port->port % 2)
lane = TB_LC_SX_CTRL_L1D;
else
lane = TB_LC_SX_CTRL_L2D;
if (configure)
ctrl |= lane;
else
ctrl &= ~lane;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_configure_xdomain() - Inform LC that the link is XDomain
* @port: Switch downstream port connected to another host
*
* Sets the lane configured for XDomain accordingly so that the LC knows
* about this. Returns %0 in success and negative errno in failure.
*/
int tb_lc_configure_xdomain(struct tb_port *port)
{
return tb_lc_set_xdomain_configured(port, true);
}
/**
* tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
* @port: Switch downstream port that was connected to another host
*
* Unsets the lane XDomain configuration.
*/
void tb_lc_unconfigure_xdomain(struct tb_port *port)
{
tb_lc_set_xdomain_configured(port, false);
}
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
u32 ctrl;
int ret;
/*
* Enable wake on PCIe and USB4 (wake coming from another
* router).
*/
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
offset + TB_LC_SX_CTRL, 1);
if (ret)
tb_lc_configure_lane(down, false);
return ret;
ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
TB_LC_SX_CTRL_WOU4);
if (flags & TB_WAKE_ON_CONNECT)
ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
if (flags & TB_WAKE_ON_USB4)
ctrl |= TB_LC_SX_CTRL_WOU4;
if (flags & TB_WAKE_ON_PCIE)
ctrl |= TB_LC_SX_CTRL_WOP;
return ret;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_unconfigure_link() - Let LC know about unconfigured link
* @sw: Switch to unconfigure
* tb_lc_set_wake() - Enable/disable wake
* @sw: Switch whose wakes to configure
* @flags: Wakeup flags (%0 to disable)
*
* Informs LC of both parent switch and @sw that the link between the
* two does not exist anymore.
* For each LC sets wake bits accordingly.
*/
void tb_lc_unconfigure_link(struct tb_switch *sw)
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
{
struct tb_port *up, *down;
int start, size, nlc, ret, i;
u32 desc;
if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
return;
if (sw->generation < 2)
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
if (!tb_route(sw))
return 0;
tb_lc_configure_lane(up, false);
tb_lc_configure_lane(down, false);
ret = read_lc_desc(sw, &desc);
if (ret)
return ret;
/* Figure out number of link controllers */
nlc = desc & TB_LC_DESC_NLC_MASK;
start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
/* For each link controller set sleep bit */
for (i = 0; i < nlc; i++) {
unsigned int offset = sw->cap_lc + start + i * size;
ret = tb_lc_set_wake_one(sw, offset, flags);
if (ret)
return ret;
}
return 0;
}
/**
......
......@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include "nhi.h"
#include "nhi_regs.h"
......@@ -863,6 +864,22 @@ static int nhi_suspend_noirq(struct device *dev)
return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
}
static int nhi_freeze_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_freeze_noirq(tb);
}
static int nhi_thaw_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_thaw_noirq(tb);
}
static bool nhi_wake_supported(struct pci_dev *pdev)
{
u8 val;
......@@ -1069,6 +1086,69 @@ static bool nhi_imr_valid(struct pci_dev *pdev)
return true;
}
/*
* During suspend the Thunderbolt controller is reset and all PCIe
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. This adds device links between the tunneled PCIe
* downstream ports and the NHI so that the device core will make sure
* NHI is resumed first before the rest.
*/
static void tb_apple_add_links(struct tb_nhi *nhi)
{
struct pci_dev *upstream, *pdev;
if (!x86_apple_machine)
return;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
break;
default:
return;
}
upstream = pci_upstream_bridge(nhi->pdev);
while (upstream) {
if (!pci_is_pcie(upstream))
return;
if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
break;
upstream = pci_upstream_bridge(upstream);
}
if (!upstream)
return;
/*
* For each hotplug downstream port, create add device link
* back to NHI so that PCIe tunnels can be re-established after
* sleep.
*/
for_each_pci_bridge(pdev, upstream->subordinate) {
const struct device_link *link;
if (!pci_is_pcie(pdev))
continue;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
!pdev->is_hotplug_bridge)
continue;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
......@@ -1134,6 +1214,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
tb_apple_add_links(nhi);
tb_acpi_add_links(nhi);
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
......@@ -1157,6 +1240,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, tb);
device_wakeup_enable(&pdev->dev);
pm_runtime_allow(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
......@@ -1186,14 +1271,13 @@ static void nhi_remove(struct pci_dev *pdev)
static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq,
.freeze_noirq = nhi_suspend_noirq, /*
.freeze_noirq = nhi_freeze_noirq, /*
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
.thaw_noirq = nhi_resume_noirq,
.thaw_noirq = nhi_thaw_noirq,
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
.freeze = nhi_suspend,
.poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
......
......@@ -59,7 +59,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
if (power) {
unsigned int retries = 10;
unsigned int retries = 350;
u32 val;
/* Wait until the firmware tells it is up and running */
......@@ -67,7 +67,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
if (val & VS_CAP_9_FW_READY)
return 0;
msleep(250);
usleep_range(3000, 3100);
} while (--retries);
return -ETIMEDOUT;
......@@ -97,7 +97,7 @@ static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
if (data & VS_CAP_18_DONE)
goto clear;
msleep(100);
usleep_range(1000, 1100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
......@@ -121,31 +121,38 @@ static void icl_nhi_set_ltr(struct tb_nhi *nhi)
static int icl_nhi_suspend(struct tb_nhi *nhi)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret;
if (icl_nhi_is_device_connected(nhi))
return 0;
/*
* If there is no device connected we need to perform both: a
* handshake through LC mailbox and force power down before
* entering D3.
*/
icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
if (ret)
return ret;
if (tb_switch_is_icm(tb->root_switch)) {
/*
* If there is no device connected we need to perform
* both: a handshake through LC mailbox and force power
* down before entering D3.
*/
icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
if (ret)
return ret;
}
return icl_nhi_force_power(nhi, false);
}
static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
enum icl_lc_mailbox_cmd cmd;
if (!pm_suspend_via_firmware())
return icl_nhi_suspend(nhi);
if (!tb_switch_is_icm(tb->root_switch))
return 0;
cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
icl_nhi_lc_mailbox_cmd(nhi, cmd);
return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
......
......@@ -27,7 +27,7 @@ static const struct tb_quirk tb_quirks[] = {
* tb_check_quirks() - Check for quirks to apply
* @sw: Thunderbolt switch
*
* Apply any quirks for the Thunderbolt controller
* Apply any quirks for the Thunderbolt controller.
*/
void tb_check_quirks(struct tb_switch *sw)
{
......
......@@ -601,6 +601,13 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
/*
* USB4 restricts programming NFC buffers to lane adapters only
* so skip other ports.
*/
if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
return 0;
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
......@@ -666,6 +673,50 @@ int tb_port_unlock(struct tb_port *port)
return 0;
}
static int __tb_port_enable(struct tb_port *port, bool enable)
{
int ret;
u32 phy;
if (!tb_port_is_null(port))
return -EINVAL;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy &= ~LANE_ADP_CS_1_LD;
else
phy |= LANE_ADP_CS_1_LD;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
* tb_port_enable() - Enable lane adapter
* @port: Port to enable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to enable it.
*/
int tb_port_enable(struct tb_port *port)
{
return __tb_port_enable(port, true);
}
/**
* tb_port_disable() - Disable lane adapter
* @port: Port to disable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to disable it.
*/
int tb_port_disable(struct tb_port *port)
{
return __tb_port_enable(port, false);
}
/**
* tb_init_port() - initialize a port
*
......@@ -739,7 +790,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
* NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
* reserved.
*/
if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID)
if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
min_hopid = TB_PATH_MIN_HOPID;
if (max_hopid < 0 || max_hopid > port_max_hopid)
......@@ -1227,23 +1278,24 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
/**
* reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
* @sw: Switch to reset
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_switch_reset(struct tb *tb, u64 route)
int tb_switch_reset(struct tb_switch *sw)
{
struct tb_cfg_result res;
struct tb_regs_switch_header header = {
header.route_hi = route >> 32,
header.route_lo = route,
header.enabled = true,
};
tb_dbg(tb, "resetting switch at %llx\n", route);
res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
0, 2, 2, 2);
if (sw->generation > 1)
return 0;
tb_sw_dbg(sw, "resetting switch\n");
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
TB_CFG_SWITCH, 2, 2);
if (res.err)
return res.err;
res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
if (res.err > 0)
return -EIO;
return res.err;
......@@ -1261,7 +1313,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
if (tb_switch_is_icm(sw))
if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
return 0;
sw->config.plug_events_delay = 0xff;
......@@ -1269,10 +1321,6 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res)
return res;
/* Plug events are always enabled in USB4 */
if (tb_switch_is_usb4(sw))
return 0;
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
......@@ -1649,7 +1697,7 @@ static struct attribute *switch_attrs[] = {
static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct tb_switch *sw = tb_to_switch(dev);
if (attr == &dev_attr_device.attr) {
......@@ -1988,7 +2036,7 @@ int tb_switch_configure(struct tb_switch *sw)
route = tb_route(sw);
tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
sw->config.enabled ? "restoring " : "initializing", route,
sw->config.enabled ? "restoring" : "initializing", route,
tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
......@@ -2008,10 +2056,6 @@ int tb_switch_configure(struct tb_switch *sw)
return ret;
ret = usb4_switch_setup(sw);
if (ret)
return ret;
ret = usb4_switch_configure_link(sw);
} else {
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
......@@ -2025,10 +2069,6 @@ int tb_switch_configure(struct tb_switch *sw)
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
ROUTER_CS_1, 3);
if (ret)
return ret;
ret = tb_lc_configure_link(sw);
}
if (ret)
return ret;
......@@ -2311,6 +2351,69 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
tb_sw_dbg(sw, "lane bonding disabled\n");
}
/**
* tb_switch_configure_link() - Set link configured
* @sw: Switch whose link is configured
*
* Sets the link upstream from @sw configured (from both ends) so that
* it will not be disconnected when the domain exits sleep. Can be
* called for any switch.
*
* It is recommended that this is called after lane bonding is enabled.
*
* Returns %0 on success and negative errno in case of error.
*/
int tb_switch_configure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
ret = usb4_port_configure(up);
else
ret = tb_lc_configure_port(up);
if (ret)
return ret;
down = up->remote;
if (tb_switch_is_usb4(down->sw))
return usb4_port_configure(down);
return tb_lc_configure_port(down);
}
/**
* tb_switch_unconfigure_link() - Unconfigure link
* @sw: Switch whose link is unconfigured
*
* Sets the link unconfigured so the @sw will be disconnected if the
* domain exists sleep.
*/
void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
if (sw->is_unplugged)
return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
usb4_port_unconfigure(up);
else
tb_lc_unconfigure_port(up);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
}
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
......@@ -2399,6 +2502,13 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
}
/*
* Thunderbolt routers do not generate wakeups themselves but
* they forward wakeups from tunneled protocols, so enable it
* here.
*/
device_init_wakeup(&sw->dev, true);
pm_runtime_set_active(&sw->dev);
if (sw->rpm) {
pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
......@@ -2408,6 +2518,7 @@ int tb_switch_add(struct tb_switch *sw)
pm_request_autosuspend(&sw->dev);
}
tb_switch_debugfs_init(sw);
return 0;
}
......@@ -2423,6 +2534,8 @@ void tb_switch_remove(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_debugfs_remove(sw);
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
pm_runtime_disable(&sw->dev);
......@@ -2445,11 +2558,6 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
if (tb_switch_is_usb4(sw))
usb4_switch_unconfigure_link(sw);
else
tb_lc_unconfigure_link(sw);
tb_switch_nvm_remove(sw);
if (tb_route(sw))
......@@ -2481,6 +2589,18 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
}
static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
else
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
return usb4_switch_set_wake(sw, flags);
return tb_lc_set_wake(sw, flags);
}
int tb_switch_resume(struct tb_switch *sw)
{
struct tb_port *port;
......@@ -2526,6 +2646,13 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
/* Disable wakes */
tb_switch_set_wake(sw, 0);
err = tb_switch_tmu_init(sw);
if (err)
return err;
/* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
......@@ -2555,20 +2682,43 @@ int tb_switch_resume(struct tb_switch *sw)
return 0;
}
void tb_switch_suspend(struct tb_switch *sw)
/**
* tb_switch_suspend() - Put a switch to sleep
* @sw: Switch to suspend
* @runtime: Is this runtime suspend or system sleep
*
* Suspends router and all its children. Enables wakes according to
* value of @runtime and then sets sleep bit for the router. If @sw is
* host router the domain is ready to go to sleep once this function
* returns.
*/
void tb_switch_suspend(struct tb_switch *sw, bool runtime)
{
unsigned int flags = 0;
struct tb_port *port;
int err;
tb_sw_dbg(sw, "suspending switch\n");
err = tb_plug_events_active(sw, false);
if (err)
return;
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
tb_switch_suspend(port->remote->sw);
tb_switch_suspend(port->remote->sw, runtime);
}
if (runtime) {
/* Trigger wake when something is plugged in/out */
flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
} else if (device_may_wakeup(&sw->dev)) {
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
tb_switch_set_wake(sw, flags);
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
else
......
This diff is collapsed.
This diff is collapsed.
......@@ -28,6 +28,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_LOOP = 8,
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
};
/* common header */
......
......@@ -39,6 +39,7 @@ enum tb_switch_vse_cap {
enum tb_port_cap {
TB_PORT_CAP_PHY = 0x01,
TB_PORT_CAP_POWER = 0x02,
TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05,
......@@ -93,6 +94,20 @@ struct tb_cap_extended_long {
u16 length;
} __packed;
/**
* struct tb_cap_any - Structure capable of hold every capability
* @basic: Basic capability
* @extended_short: Vendor specific capability
* @extended_long: Vendor specific extended capability
*/
struct tb_cap_any {
union {
struct tb_cap_basic basic;
struct tb_cap_extended_short extended_short;
struct tb_cap_extended_long extended_long;
};
} __packed;
/* capabilities */
struct tb_cap_link_controller {
......@@ -178,6 +193,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_4 0x04
#define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_C3S BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
......@@ -186,6 +203,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_6 0x06
#define ROUTER_CS_6_SLPR BIT(0)
#define ROUTER_CS_6_TNS BIT(1)
#define ROUTER_CS_6_WOPS BIT(2)
#define ROUTER_CS_6_WOUS BIT(3)
#define ROUTER_CS_6_HCI BIT(18)
#define ROUTER_CS_6_CR BIT(25)
#define ROUTER_CS_7 0x07
......@@ -234,7 +253,8 @@ struct tb_regs_port_header {
/* DWORD 1 */
u32 first_cap_offset:8;
u32 max_counters:11;
u32 __unknown1:5;
u32 counters_support:1;
u32 __unknown1:4;
u32 revision:8;
/* DWORD 2 */
enum tb_port_type type:24;
......@@ -279,6 +299,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
......@@ -301,8 +322,13 @@ struct tb_regs_port_header {
#define PORT_CS_18 0x12
#define PORT_CS_18_BE BIT(8)
#define PORT_CS_18_TCM BIT(9)
#define PORT_CS_18_WOU4S BIT(18)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
#define PORT_CS_19_WOD BIT(17)
#define PORT_CS_19_WOU4 BIT(18)
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
......@@ -416,8 +442,14 @@ struct tb_regs_hop {
#define TB_LC_PORT_ATTR_BE BIT(12)
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_WOC BIT(1)
#define TB_LC_SX_CTRL_WOD BIT(2)
#define TB_LC_SX_CTRL_WOU4 BIT(5)
#define TB_LC_SX_CTRL_WOP BIT(6)
#define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L1D BIT(17)
#define TB_LC_SX_CTRL_L2C BIT(20)
#define TB_LC_SX_CTRL_L2D BIT(21)
#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
#define TB_LC_SX_CTRL_SLP BIT(31)
......
......@@ -1623,4 +1623,15 @@ static struct kunit_suite tb_test_suite = {
.name = "thunderbolt",
.test_cases = tb_test_cases,
};
kunit_test_suite(tb_test_suite);
static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
int tb_test_init(void)
{
return __kunit_test_suites_init(tb_test_suites);
}
void tb_test_exit(void)
{
return __kunit_test_suites_exit(tb_test_suites);
}
......@@ -196,6 +196,46 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
return 0;
}
static void usb4_switch_check_wakes(struct tb_switch *sw)
{
struct tb_port *port;
bool wakeup = false;
u32 val;
if (!device_may_wakeup(&sw->dev))
return;
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
(val & ROUTER_CS_6_WOPS) ? "yes" : "no",
(val & ROUTER_CS_6_WOUS) ? "yes" : "no");
wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
}
/* Check for any connected downstream ports for USB4 wake */
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_18, 1))
break;
tb_port_dbg(port, "USB4 wake: %s\n",
(val & PORT_CS_18_WOU4S) ? "yes" : "no");
if (val & PORT_CS_18_WOU4S)
wakeup = true;
}
if (wakeup)
pm_wakeup_event(&sw->dev, 0);
}
static bool link_is_usb4(struct tb_port *port)
{
u32 val;
......@@ -229,6 +269,8 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
usb4_switch_check_wakes(sw);
if (!tb_route(sw))
return 0;
......@@ -338,87 +380,99 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
usb4_switch_drom_read_block, sw);
}
static int usb4_set_port_configured(struct tb_port *port, bool configured)
/**
* usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
* @sw: USB4 router
*
* Checks whether conditions are met so that lane bonding can be
* established with the upstream router. Call only for device routers.
*/
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
{
struct tb_port *up;
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
up = tb_upstream_port(sw);
ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PC;
else
val &= ~PORT_CS_19_PC;
return false;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
return !!(val & PORT_CS_18_BE);
}
/**
* usb4_switch_configure_link() - Set upstream USB4 link configured
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
*
* Sets the upstream USB4 link to be configured for power management
* purposes.
* Enables/disables router to wake up from sleep.
*/
int usb4_switch_configure_link(struct tb_switch *sw)
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
struct tb_port *up;
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
int ret;
if (!tb_route(sw))
return 0;
/*
* Enable wakes coming from all USB4 downstream ports (from
* child routers). For device routers do this also for the
* upstream USB4 port.
*/
tb_switch_for_each_port(sw, port) {
if (!route && tb_is_upstream_port(port))
continue;
up = tb_upstream_port(sw);
return usb4_set_port_configured(up, true);
}
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
/**
* usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
* @sw: USB4 router
*
* Reverse of usb4_switch_configure_link().
*/
void usb4_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up;
val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
if (sw->is_unplugged || !tb_route(sw))
return;
if (flags & TB_WAKE_ON_CONNECT)
val |= PORT_CS_19_WOC;
if (flags & TB_WAKE_ON_DISCONNECT)
val |= PORT_CS_19_WOD;
if (flags & TB_WAKE_ON_USB4)
val |= PORT_CS_19_WOU4;
up = tb_upstream_port(sw);
usb4_set_port_configured(up, false);
}
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
}
/**
* usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
* @sw: USB4 router
*
* Checks whether conditions are met so that lane bonding can be
* established with the upstream router. Call only for device routers.
*/
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
{
struct tb_port *up;
int ret;
u32 val;
/*
* Enable wakes from PCIe and USB 3.x on this router. Only
* needed for device routers.
*/
if (route) {
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
up = tb_upstream_port(sw);
ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
if (ret)
return false;
val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
if (flags & TB_WAKE_ON_USB3)
val |= ROUTER_CS_5_WOU;
if (flags & TB_WAKE_ON_PCIE)
val |= ROUTER_CS_5_WOP;
return !!(val & PORT_CS_18_BE);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
}
return 0;
}
/**
* usb4_switch_set_sleep() - Prepare the router to enter sleep
* @sw: USB4 router
*
* Enables wakes and sets sleep bit for the router. Returns when the
* router sleep ready bit has been asserted.
* Sets sleep bit for the router. Returns when the router sleep ready
* bit has been asserted.
*/
int usb4_switch_set_sleep(struct tb_switch *sw)
{
......@@ -795,6 +849,95 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PC;
else
val &= ~PORT_CS_19_PC;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure() - Set USB4 port configured
* @port: USB4 router
*
* Sets the USB4 link to be configured for power management purposes.
*/
int usb4_port_configure(struct tb_port *port)
{
return usb4_port_set_configured(port, true);
}
/**
* usb4_port_unconfigure() - Set USB4 port unconfigured
* @port: USB4 router
*
* Sets the USB4 link to be unconfigured for power management purposes.
*/
void usb4_port_unconfigure(struct tb_port *port)
{
usb4_port_set_configured(port, false);
}
static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PID;
else
val &= ~PORT_CS_19_PID;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
*
* Marks the USB4 port as being connected to another host. Returns %0 in
* success and negative errno in failure.
*/
int usb4_port_configure_xdomain(struct tb_port *port)
{
return usb4_set_xdomain_configured(port, true);
}
/**
* usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
* @port: USB4 port that was connected to another host
*
* Clears USB4 port from being marked as XDomain.
*/
void usb4_port_unconfigure_xdomain(struct tb_port *port)
{
usb4_set_xdomain_configured(port, false);
}
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
u32 value, int timeout_msec)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment