Commit aed1a2a5 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v6.5-rc1' of...

Merge tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v6.5 merge window

This includes following Thunderbolt/USB4 changes for the v6.5 merge
window:

  - Improve debug logging
  - Rework for TMU and CL states handling
  - Retimer access improvements
  - Initial support for USB4 v2 features:

    * 80G symmetric link support
    * New notifications
    * PCIe extended encapsulation
    * enhanced uni-directional TMU mode
    * CL2 link low power state
    * DisplayPort 2.x tunneling

  - Support for Intel Barlow Ridge Thunderbolt/USB4 controller
  - Minor fixes and improvements.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (55 commits)
  thunderbolt: Add test case for 3 DisplayPort tunnels
  thunderbolt: Add DisplayPort 2.x tunneling support
  thunderbolt: Make bandwidth allocation mode function names consistent
  thunderbolt: Enable CL2 low power state
  thunderbolt: Add support for enhanced uni-directional TMU mode
  thunderbolt: Increase NVM_MAX_SIZE to support Intel Barlow Ridge controller
  thunderbolt: Move constants related to NVM into nvm.c
  thunderbolt: Limit Intel Barlow Ridge USB3 bandwidth
  thunderbolt: Add Intel Barlow Ridge PCI ID
  thunderbolt: Fix PCIe adapter capability length for USB4 v2 routers
  thunderbolt: Fix DisplayPort IN adapter capability length for USB4 v2 routers
  thunderbolt: Add two additional double words for adapters TMU for USB4 v2 routers
  thunderbolt: Enable USB4 v2 PCIe TLP/DLLP extended encapsulation
  thunderbolt: Announce USB4 v2 connection manager support
  thunderbolt: Reset USB4 v2 host router
  thunderbolt: Add the new USB4 v2 notification types
  thunderbolt: Add support for USB4 v2 80 Gb/s link
  thunderbolt: Identify USB4 v2 routers
  thunderbolt: Do not touch lane 1 adapter path config space
  thunderbolt: Ignore data CRC mismatch for USB4 routers
  ...
parents 99f2d956 481012b4
......@@ -2,7 +2,7 @@
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o clx.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
......
......@@ -296,16 +296,15 @@ static bool tb_acpi_bus_match(struct device *dev)
static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
struct acpi_device *adev = NULL;
struct tb_switch *parent_sw;
/*
* Device routers exists under the downstream facing USB4 port
* of the parent router. Their _ADR is always 0.
*/
parent_sw = tb_switch_parent(sw);
if (parent_sw) {
struct tb_port *port = tb_port_at(tb_route(sw), parent_sw);
struct tb_port *port = tb_switch_downstream_port(sw);
struct acpi_device *port_adev;
port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),
......
// SPDX-License-Identifier: GPL-2.0
/*
* CLx support
*
* Copyright (C) 2020 - 2023, Intel Corporation
* Authors: Gil Fine <gil.fine@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/module.h>
#include "tb.h"
static bool clx_enabled = true;
module_param_named(clx, clx_enabled, bool, 0444);
MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
static const char *clx_name(unsigned int clx)
{
switch (clx) {
case TB_CL0S | TB_CL1 | TB_CL2:
return "CL0s/CL1/CL2";
case TB_CL1 | TB_CL2:
return "CL1/CL2";
case TB_CL0S | TB_CL2:
return "CL0s/CL2";
case TB_CL0S | TB_CL1:
return "CL0s/CL1";
case TB_CL0S:
return "CL0s";
case 0:
return "disabled";
default:
return "unknown";
}
}
static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, false);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
{
u32 val, mask = 0;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_0_CL1_SUPPORT;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_0_CL2_SUPPORT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
{
u32 phy, mask = 0;
int ret;
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_1_CL0S_ENABLE;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_1_CL1_ENABLE;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_1_CL2_ENABLE;
if (!mask)
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, true);
}
static int tb_port_clx(struct tb_port *port)
{
u32 val;
int ret;
if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (val & LANE_ADP_CS_1_CL0S_ENABLE)
ret |= TB_CL0S;
if (val & LANE_ADP_CS_1_CL1_ENABLE)
ret |= TB_CL1;
if (val & LANE_ADP_CS_1_CL2_ENABLE)
ret |= TB_CL2;
return ret;
}
/**
* tb_port_clx_is_enabled() - Is given CL state enabled
* @port: USB4 port to check
* @clx: Mask of CL states to check
*
* Returns true if any of the given CL states is enabled for @port.
*/
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
{
return !!(tb_port_clx(port) & clx);
}
/**
* tb_switch_clx_init() - Initialize router CL states
* @sw: Router
*
* Can be called for any router. Initializes the current CL state by
* reading it from the hardware.
*
* Returns %0 in case of success and negative errno in case of failure.
*/
int tb_switch_clx_init(struct tb_switch *sw)
{
struct tb_port *up, *down;
unsigned int clx, tmp;
if (tb_switch_is_icm(sw))
return 0;
if (!tb_route(sw))
return 0;
if (!tb_switch_clx_is_supported(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
clx = tb_port_clx(up);
tmp = tb_port_clx(down);
if (clx != tmp)
tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
clx, tmp);
tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
sw->clx = clx;
return 0;
}
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_pm_secondary_enable(up);
if (ret)
return ret;
return tb_port_pm_secondary_disable(down);
}
static int tb_switch_mask_clx_objections(struct tb_switch *sw)
{
int up_port = sw->config.upstream_port_number;
u32 offset, val[2], mask_obj, unmask_obj;
int ret, i;
/* Only Titan Ridge of pre-USB4 devices support CLx states */
if (!tb_switch_is_titan_ridge(sw))
return 0;
if (!tb_route(sw))
return 0;
/*
* In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
* Port A consists of lane adapters 1,2 and
* Port B consists of lane adapters 3,4
* If upstream port is A, (lanes are 1,2), we mask objections from
* port B (lanes 3,4) and unmask objections from Port A and vice-versa.
*/
if (up_port == 1) {
mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
offset = TB_LOW_PWR_C1_CL1;
} else {
mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
offset = TB_LOW_PWR_C3_CL1;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] |= mask_obj;
val[i] &= ~unmask_obj;
}
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
}
/**
* tb_switch_clx_is_supported() - Is CLx supported on this type of router
* @sw: The router to check CLx support for
*/
bool tb_switch_clx_is_supported(const struct tb_switch *sw)
{
if (!clx_enabled)
return false;
if (sw->quirks & QUIRK_NO_CLX)
return false;
/*
* CLx is not enabled and validated on Intel USB4 platforms
* before Alder Lake.
*/
if (tb_switch_is_tiger_lake(sw))
return false;
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
static bool validate_mask(unsigned int clx)
{
/* Previous states need to be enabled */
if (clx & TB_CL1)
return (clx & TB_CL0S) == TB_CL0S;
return true;
}
/**
* tb_switch_clx_enable() - Enable CLx on upstream port of specified router
* @sw: Router to enable CLx for
* @clx: The CLx state to enable
*
* CLx is enabled only if both sides of the link support CLx, and if both sides
* of the link are not configured as two single lane links and only if the link
* is not inter-domain link. The complete set of conditions is described in CM
* Guide 1.0 section 8.1.
*
* Returns %0 on success or an error code on failure.
*/
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
{
bool up_clx_support, down_clx_support;
struct tb_switch *parent_sw;
struct tb_port *up, *down;
int ret;
if (!clx || sw->clx == clx)
return 0;
if (!validate_mask(clx))
return -EINVAL;
parent_sw = tb_switch_parent(sw);
if (!parent_sw)
return 0;
if (!tb_switch_clx_is_supported(parent_sw) ||
!tb_switch_clx_is_supported(sw))
return 0;
/* Only support CL2 for v2 routers */
if ((clx & TB_CL2) &&
(usb4_switch_version(parent_sw) < 2 ||
usb4_switch_version(sw) < 2))
return -EOPNOTSUPP;
ret = tb_switch_pm_secondary_resolve(sw);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
up_clx_support = tb_port_clx_supported(up, clx);
down_clx_support = tb_port_clx_supported(down, clx);
tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
up_clx_support ? "" : "not ");
tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
down_clx_support ? "" : "not ");
if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_enable(down, clx);
if (ret) {
tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
tb_port_clx_disable(up, clx);
tb_port_clx_disable(down, clx);
return ret;
}
sw->clx |= clx;
tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
return 0;
}
/**
* tb_switch_clx_disable() - Disable CLx on upstream port of specified router
* @sw: Router to disable CLx for
*
* Disables all CL states of the given router. Can be called on any
* router and if the states were not enabled already does nothing.
*
* Returns the CL states that were disabled or negative errno in case of
* failure.
*/
int tb_switch_clx_disable(struct tb_switch *sw)
{
unsigned int clx = sw->clx;
struct tb_port *up, *down;
int ret;
if (!tb_switch_clx_is_supported(sw))
return 0;
if (!clx)
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = 0;
tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
return clx;
}
......@@ -409,6 +409,13 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
case TB_CFG_ERROR_DP_BW:
case TB_CFG_ERROR_ROP_CMPLT:
case TB_CFG_ERROR_POP_CMPLT:
case TB_CFG_ERROR_PCIE_WAKE:
case TB_CFG_ERROR_DP_CON_CHANGE:
case TB_CFG_ERROR_DPTX_DISCOVERY:
case TB_CFG_ERROR_LINK_RECOVERY:
case TB_CFG_ERROR_ASYM_LINK:
return true;
default:
......@@ -758,6 +765,27 @@ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
case TB_CFG_ERROR_DP_BW:
name = "DP_BW";
break;
case TB_CFG_ERROR_ROP_CMPLT:
name = "router operation completion";
break;
case TB_CFG_ERROR_POP_CMPLT:
name = "port operation completion";
break;
case TB_CFG_ERROR_PCIE_WAKE:
name = "PCIe wake";
break;
case TB_CFG_ERROR_DP_CON_CHANGE:
name = "DP connector change";
break;
case TB_CFG_ERROR_DPTX_DISCOVERY:
name = "DPTX discovery";
break;
case TB_CFG_ERROR_LINK_RECOVERY:
name = "link recovery";
break;
case TB_CFG_ERROR_ASYM_LINK:
name = "asymmetric link";
break;
default:
name = "unknown";
break;
......
......@@ -14,12 +14,15 @@
#include "tb.h"
#include "sb_regs.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_V1_PCIE_LEN 1
#define PORT_CAP_V2_PCIE_LEN 2
#define PORT_CAP_POWER_LEN 2
#define PORT_CAP_LANE_LEN 3
#define PORT_CAP_USB3_LEN 5
#define PORT_CAP_DP_LEN 8
#define PORT_CAP_TMU_LEN 8
#define PORT_CAP_DP_V1_LEN 9
#define PORT_CAP_DP_V2_LEN 14
#define PORT_CAP_TMU_V1_LEN 8
#define PORT_CAP_TMU_V2_LEN 10
#define PORT_CAP_BASIC_LEN 9
#define PORT_CAP_USB4_LEN 20
......@@ -553,8 +556,9 @@ static int margining_run_write(void *data, u64 val)
struct usb4_port *usb4 = port->usb4;
struct tb_switch *sw = port->sw;
struct tb_margining *margining;
struct tb_switch *down_sw;
struct tb *tb = sw->tb;
int ret;
int ret, clx;
if (val != 1)
return -EINVAL;
......@@ -566,16 +570,25 @@ static int margining_run_write(void *data, u64 val)
goto out_rpm_put;
}
if (tb_is_upstream_port(port))
down_sw = sw;
else if (port->remote)
down_sw = port->remote->sw;
else
down_sw = NULL;
if (down_sw) {
/*
* CL states may interfere with lane margining so inform the user know
* and bail out.
* CL states may interfere with lane margining so
* disable them temporarily now.
*/
if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
tb_port_warn(port,
"CL states are enabled, Disable them with clx=0 and re-connect\n");
ret = -EINVAL;
ret = tb_switch_clx_disable(down_sw);
if (ret < 0) {
tb_sw_warn(down_sw, "failed to disable CL states\n");
goto out_unlock;
}
clx = ret;
}
margining = usb4->margining;
......@@ -586,7 +599,7 @@ static int margining_run_write(void *data, u64 val)
margining->right_high,
USB4_MARGIN_SW_COUNTER_CLEAR);
if (ret)
goto out_unlock;
goto out_clx;
ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
} else {
......@@ -600,6 +613,9 @@ static int margining_run_write(void *data, u64 val)
margining->right_high, margining->results);
}
out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
......@@ -1148,7 +1164,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
break;
case TB_PORT_CAP_TIME1:
length = PORT_CAP_TMU_LEN;
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_TMU_V1_LEN;
else
length = PORT_CAP_TMU_V2_LEN;
break;
case TB_PORT_CAP_POWER:
......@@ -1157,12 +1176,17 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
case TB_PORT_CAP_ADAP:
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
if (usb4_dp_port_bw_mode_supported(port))
length = PORT_CAP_DP_LEN + 1;
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_V1_PCIE_LEN;
else
length = PORT_CAP_V2_PCIE_LEN;
} else if (tb_port_is_dpin(port)) {
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_DP_V1_LEN;
else
length = PORT_CAP_DP_LEN;
length = PORT_CAP_DP_V2_LEN;
} else if (tb_port_is_dpout(port)) {
length = PORT_CAP_DP_V1_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
......
......@@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
}
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) {
dma_test_free_rings(dt);
return ret;
......@@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
tb_ring_stop(dt->tx_ring);
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
......@@ -412,6 +412,7 @@ static void speed_get(const struct dma_test *dt, u64 *val)
static int speed_validate(u64 val)
{
switch (val) {
case 40:
case 20:
case 10:
case 0:
......@@ -489,8 +490,11 @@ static void dma_test_check_errors(struct dma_test *dt, int ret)
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
} else if (dt->link_width &&
dt->xd->link_width != dt->link_width) {
} else if (dt->link_width) {
const struct tb_xdomain *xd = dt->xd;
if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
(dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
......@@ -756,5 +760,5 @@ module_exit(dma_test_exit);
MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("DMA traffic test driver");
MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
MODULE_LICENSE("GPL v2");
......@@ -605,9 +605,8 @@ static int usb4_drom_parse(struct tb_switch *sw)
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
return -EINVAL;
}
return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
......
......@@ -644,13 +644,14 @@ static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
u64 route, u8 connection_id, u8 connection_key,
u8 link, u8 depth, bool boot)
static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id,
u8 connection_key, u8 link, u8 depth, bool boot)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
/* Disconnect from parent */
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
/* Re-connect via updated port*/
tb_switch_downstream_port(sw)->remote = NULL;
/* Re-connect via updated port */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
/* Update with the new addressing information */
......@@ -671,10 +672,7 @@ static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
static void remove_switch(struct tb_switch *sw)
{
struct tb_switch *parent_sw;
parent_sw = tb_to_switch(sw->dev.parent);
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_downstream_port(sw)->remote = NULL;
tb_switch_remove(sw);
}
......@@ -755,7 +753,6 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (sw) {
u8 phy_port, sw_phy_port;
parent_sw = tb_to_switch(sw->dev.parent);
sw_phy_port = tb_phy_port_from_link(sw->link);
phy_port = tb_phy_port_from_link(link);
......@@ -785,7 +782,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
route = tb_route(sw);
}
update_switch(parent_sw, sw, route, pkg->connection_id,
update_switch(sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
return;
......@@ -853,7 +850,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? 2 : 1;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
if (add_switch(parent_sw, sw))
......@@ -1236,9 +1234,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
if (sw) {
/* Update the switch if it is still in the same place */
if (tb_route(sw) == route && !!sw->authorized == authorized) {
parent_sw = tb_to_switch(sw->dev.parent);
update_switch(parent_sw, sw, route, pkg->connection_id,
0, 0, 0, boot);
update_switch(sw, route, pkg->connection_id, 0, 0, 0,
boot);
tb_switch_put(sw);
return;
}
......@@ -1276,7 +1273,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? 2 : 1;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = force_rtd3;
if (!sw->rpm)
sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
......
......@@ -46,6 +46,10 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
static bool host_reset = true;
module_param(host_reset, bool, 0444);
MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
......@@ -56,9 +60,14 @@ static int ring_interrupt_index(const struct tb_ring *ring)
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
u32 val;
val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
} else {
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
}
}
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
......@@ -1212,6 +1221,37 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
static void nhi_reset(struct tb_nhi *nhi)
{
ktime_t timeout;
u32 val;
val = ioread32(nhi->iobase + REG_CAPS);
/* Reset only v2 and later routers */
if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
return;
if (!host_reset) {
dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
return;
}
iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
msleep(100);
timeout = ktime_add_ms(ktime_get(), 500);
do {
val = ioread32(nhi->iobase + REG_RESET);
if (!(val & REG_RESET_HRR)) {
dev_warn(&nhi->pdev->dev, "host router reset successful\n");
return;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
}
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
......@@ -1312,7 +1352,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
......@@ -1325,6 +1365,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
nhi_reset(nhi);
res = nhi_init_msi(nhi);
if (res)
return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
......@@ -1475,6 +1517,8 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
......@@ -1483,6 +1527,7 @@ static struct pci_device_id nhi_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {
......
......@@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
......
......@@ -37,7 +37,7 @@ struct ring_desc {
/* NHI registers in bar 0 */
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring tail (set by NHI)
* 10: ring head (index of first non posted descriptor)
......@@ -46,7 +46,7 @@ struct ring_desc {
#define REG_TX_RING_BASE 0x00000
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring head (index of first not posted descriptor)
* 10: ring tail (set by NHI)
......@@ -56,7 +56,7 @@ struct ring_desc {
#define REG_RX_RING_BASE 0x08000
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum_ring_flags
* 04: isoch time stamp ?? (write 0)
* ..: unknown
......@@ -64,7 +64,7 @@ struct ring_desc {
#define REG_TX_OPTIONS_BASE 0x19800
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum ring_flags
* If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
* the corresponding TX hop id.
......@@ -77,7 +77,7 @@ struct ring_desc {
/*
* three bitfields: tx, rx, rx overflow
* Every bitfield contains one bit for every hop (REG_HOP_COUNT).
* Every bitfield contains one bit for every hop (REG_CAPS).
* New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
......@@ -87,7 +87,7 @@ struct ring_desc {
/*
* two bitfields: rx, tx
* Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
* Both bitfields contains one bit for every hop (REG_CAPS). To
* enable/disable interrupts set/clear the corresponding bits.
*/
#define REG_RING_INTERRUPT_BASE 0x38200
......@@ -104,12 +104,17 @@ struct ring_desc {
#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS)
/* The last 11 bits contain the number of hops supported by the NHI port. */
#define REG_HOP_COUNT 0x39640
#define REG_CAPS 0x39640
#define REG_CAPS_VERSION_MASK GENMASK(23, 16)
#define REG_CAPS_VERSION_2 0x40
#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
#define REG_RESET 0x39898
#define REG_RESET_HRR BIT(0)
#define REG_INMAIL_DATA 0x39900
#define REG_INMAIL_CMD 0x39904
......
......@@ -12,6 +12,10 @@
#include "tb.h"
#define NVM_MIN_SIZE SZ_32K
#define NVM_MAX_SIZE SZ_1M
#define NVM_DATA_DWORDS 16
/* Intel specific NVM offsets */
#define INTEL_NVM_DEVID 0x05
#define INTEL_NVM_VERSION 0x08
......
......@@ -10,6 +10,7 @@
static void quirk_force_power_link(struct tb_switch *sw)
{
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
tb_sw_dbg(sw, "forcing power to link controller\n");
}
static void quirk_dp_credit_allocation(struct tb_switch *sw)
......@@ -74,6 +75,14 @@ static const struct tb_quirk tb_quirks[] = {
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
......@@ -105,6 +114,7 @@ void tb_check_quirks(struct tb_switch *sw)
if (q->device && q->device != sw->device)
continue;
tb_sw_dbg(sw, "running %ps\n", q->hook);
q->hook(sw);
}
}
......@@ -187,10 +187,34 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret;
}
static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
{
int i;
tb_port_dbg(port, "reading NVM authentication status of retimers\n");
/*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
*/
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
}
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is online sideband communications are
* already up.
*/
if (!usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "enabling sideband transactions\n");
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_set_inbound_sbtx(port, i);
}
......@@ -199,6 +223,16 @@ static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is offline we need to keep the sideband
* communications up to make it possible to communicate with
* the connected retimers.
*/
if (usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "disabling sideband transactions\n");
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
usb4_port_retimer_unset_inbound_sbtx(port, i);
}
......@@ -229,6 +263,13 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;
if (val) {
/*
* When NVM authentication starts the retimer is not
* accessible so calling tb_retimer_unset_inbound_sbtx()
* will fail and therefore we do not call it. Exception
* is when the validation fails or we only write the new
* NVM image without authentication.
*/
tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
......@@ -249,6 +290,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}
exit_unlock:
if (ret || val == WRITE_ONLY)
tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
......@@ -341,12 +383,6 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
return ret;
}
if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
vendor);
return -EOPNOTSUPP;
}
/*
* Check that it supports NVM operations. If not then don't add
* the device at all.
......@@ -455,18 +491,16 @@ int tb_retimer_scan(struct tb_port *port, bool add)
return ret;
/*
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
* Immediately after sending enumerate retimers read the
* authentication status of each retimer.
*/
tb_retimer_set_inbound_sbtx(port);
tb_retimer_nvm_authenticate_status(port, status);
/*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
tb_retimer_set_inbound_sbtx(port);
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -30,6 +30,13 @@ enum tb_cfg_error {
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
TB_CFG_ERROR_DP_BW = 32,
TB_CFG_ERROR_ROP_CMPLT = 33,
TB_CFG_ERROR_POP_CMPLT = 34,
TB_CFG_ERROR_PCIE_WAKE = 35,
TB_CFG_ERROR_DP_CON_CHANGE = 36,
TB_CFG_ERROR_DPTX_DISCOVERY = 37,
TB_CFG_ERROR_LINK_RECOVERY = 38,
TB_CFG_ERROR_ASYM_LINK = 39,
};
/* common header */
......
......@@ -190,11 +190,14 @@ struct tb_regs_switch_header {
u32 thunderbolt_version:8;
} __packed;
/* USB4 version 1.0 */
#define USB4_VERSION_1_0 0x20
/* Used with the router thunderbolt_version */
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
#define ROUTER_CS_4_CMUV_V2 0x20
#define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
......@@ -249,11 +252,13 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
#define TMU_RTR_CS_15 0xf
#define TMU_RTR_CS_15 0x0f
#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0)
#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6)
#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12)
#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18)
#define TMU_RTR_CS_18 0x12
#define TMU_RTR_CS_18_DELTA_AVG_CONST_MASK GENMASK(23, 16)
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
#define TMU_RTR_CS_25 0x19
......@@ -319,6 +324,14 @@ struct tb_regs_port_header {
#define TMU_ADP_CS_3_UDM BIT(29)
#define TMU_ADP_CS_6 0x06
#define TMU_ADP_CS_6_DTS BIT(1)
#define TMU_ADP_CS_8 0x08
#define TMU_ADP_CS_8_REPL_TIMEOUT_MASK GENMASK(14, 0)
#define TMU_ADP_CS_8_EUDM BIT(15)
#define TMU_ADP_CS_8_REPL_THRESHOLD_MASK GENMASK(25, 16)
#define TMU_ADP_CS_9 0x09
#define TMU_ADP_CS_9_REPL_N_MASK GENMASK(7, 0)
#define TMU_ADP_CS_9_DIRSWITCH_N_MASK GENMASK(15, 8)
#define TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK GENMASK(31, 16)
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
......@@ -346,6 +359,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN4 0x2
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
#define LANE_ADP_CS_1_PMS BIT(30)
......@@ -436,6 +450,9 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_1_LANE 0x0
#define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2
#define DP_COMMON_CAP_UHBR10 BIT(17)
#define DP_COMMON_CAP_UHBR20 BIT(18)
#define DP_COMMON_CAP_UHBR13_5 BIT(19)
#define DP_COMMON_CAP_LTTPR_NS BIT(27)
#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31)
......@@ -447,6 +464,8 @@ struct tb_regs_port_header {
/* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
#define ADP_PCIE_CS_1 0x01
#define ADP_PCIE_CS_1_EE BIT(0)
/* USB adapter registers */
#define ADP_USB3_CS_0 0x00
......
......@@ -170,6 +170,23 @@ static struct tb_switch *alloc_host_usb4(struct kunit *test)
return sw;
}
static struct tb_switch *alloc_host_br(struct kunit *test)
{
struct tb_switch *sw;
sw = alloc_host_usb4(test);
if (!sw)
return NULL;
sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[10].config.max_in_hop_id = 9;
sw->ports[10].config.max_out_hop_id = 9;
sw->ports[10].cap_adap = -1;
sw->ports[10].disabled = false;
return sw;
}
static struct tb_switch *alloc_dev_default(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
......@@ -1583,6 +1600,71 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
/*
* Create 3 DP tunnels from Host to Devices #2, #5 and #4.
*
* [Host]
* 3 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
* | 5
* | 1
* [Device #5]
*/
host = alloc_host_br(test);
dev1 = alloc_dev_default(test, host, 0x3, true);
dev2 = alloc_dev_default(test, dev1, 0x303, true);
dev3 = alloc_dev_default(test, dev1, 0x503, true);
dev4 = alloc_dev_default(test, dev1, 0x703, true);
dev5 = alloc_dev_default(test, dev3, 0x50503, true);
in1 = &host->ports[5];
in2 = &host->ports[6];
in3 = &host->ports[10];
out1 = &dev2->ports[13];
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
tb_tunnel_free(tunnel2);
tb_tunnel_free(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
......@@ -2790,6 +2872,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_tunnel_dp_chain),
KUNIT_CASE(tb_test_tunnel_dp_tree),
KUNIT_CASE(tb_test_tunnel_dp_max_length),
KUNIT_CASE(tb_test_tunnel_3dp),
KUNIT_CASE(tb_test_tunnel_port_on_path),
KUNIT_CASE(tb_test_tunnel_usb3),
KUNIT_CASE(tb_test_tunnel_dma),
......
This diff is collapsed.
This diff is collapsed.
......@@ -15,6 +15,7 @@
#include "tb.h"
#define USB4_DATA_RETRIES 3
#define USB4_DATA_DWORDS 16
enum usb4_sb_target {
USB4_SB_TARGET_ROUTER,
......@@ -112,7 +113,7 @@ static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
return -EINVAL;
/*
......@@ -231,11 +232,14 @@ static bool link_is_usb4(struct tb_port *port)
* is not available for some reason (like that there is Thunderbolt 3
* switch upstream) then the internal xHCI controller is enabled
* instead.
*
* This does not set the configuration valid bit of the router. To do
* that call usb4_switch_configuration_valid().
*/
int usb4_switch_setup(struct tb_switch *sw)
{
struct tb_port *downstream_port;
struct tb_switch *parent;
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down;
bool tbt3, xhci;
u32 val = 0;
int ret;
......@@ -249,9 +253,8 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
parent = tb_switch_parent(sw);
downstream_port = tb_port_at(tb_route(sw), parent);
sw->link_usb4 = link_is_usb4(downstream_port);
down = tb_switch_downstream_port(sw);
sw->link_usb4 = link_is_usb4(down);
tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
xhci = val & ROUTER_CS_6_HCI;
......@@ -288,7 +291,33 @@ int usb4_switch_setup(struct tb_switch *sw)
/* TBT3 supported by the CM */
val |= ROUTER_CS_5_C3S;
/* Tunneling configuration is ready now */
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
/**
* usb4_switch_configuration_valid() - Set tunneling configuration to be valid
* @sw: USB4 router
*
* Sets configuration valid bit for the router. Must be called before
* any tunnels can be set through the router and after
* usb4_switch_setup() has been called. Can be called to host and device
* routers (does nothing for the latter).
*
* Returns %0 in success and negative errno otherwise.
*/
int usb4_switch_configuration_valid(struct tb_switch *sw)
{
u32 val;
int ret;
if (!tb_route(sw))
return 0;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val |= ROUTER_CS_5_CV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
......@@ -703,7 +732,7 @@ int usb4_switch_credits_init(struct tb_switch *sw)
int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
int ret, length, i, nports;
const struct tb_port *port;
u32 data[NVM_DATA_DWORDS];
u32 data[USB4_DATA_DWORDS];
u32 metadata = 0;
u8 status = 0;
......@@ -1199,7 +1228,7 @@ static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
{
if (dwords > NVM_DATA_DWORDS)
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
......@@ -1209,7 +1238,7 @@ static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
static int usb4_port_write_data(struct tb_port *port, const void *data,
size_t dwords)
{
if (dwords > NVM_DATA_DWORDS)
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
......@@ -1845,7 +1874,7 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
int ret;
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
if (dwords < NVM_DATA_DWORDS)
if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
......@@ -2265,13 +2294,14 @@ int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
}
/**
* usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
* usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
* supported
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the adapter
* supports USB4 bandwidth allocation mode, false otherwise.
*/
bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
{
int ret;
u32 val;
......@@ -2288,13 +2318,14 @@ bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
}
/**
* usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
* usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
* enabled
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the bandwidth
* allocation mode has been enabled, false otherwise.
*/
bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
{
int ret;
u32 val;
......@@ -2311,7 +2342,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
}
/**
* usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
* usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
* bandwidth allocation mode
* @port: DP IN adapter
* @supported: Does the CM support bandwidth allocation mode
*
......@@ -2320,7 +2352,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
* does not support this.
*/
int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
bool supported)
{
u32 val;
int ret;
......@@ -2594,7 +2627,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
}
/**
* usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
* usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
* @port: DP IN adapter
* @bw: Estimated bandwidth in Mb/s.
*
......@@ -2604,7 +2637,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
* the adapter does not support this.
*/
int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
......@@ -2630,14 +2663,14 @@ int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
}
/**
* usb4_dp_port_allocated_bw() - Return allocated bandwidth
* usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
* @port: DP IN adapter
*
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
* account the programmed granularity). Returns negative errno in case
* of error.
*/
int usb4_dp_port_allocated_bw(struct tb_port *port)
int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
......@@ -2723,7 +2756,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
}
/**
* usb4_dp_port_allocate_bw() - Set allocated bandwidth
* usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
* @port: DP IN adapter
* @bw: New allocated bandwidth in Mb/s
*
......@@ -2731,7 +2764,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
* driver). Takes into account the programmed granularity. Returns %0 in
* success and negative errno in case of error.
*/
int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
......@@ -2765,7 +2798,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
}
/**
* usb4_dp_port_requested_bw() - Read requested bandwidth
* usb4_dp_port_requested_bandwidth() - Read requested bandwidth
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
......@@ -2774,7 +2807,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
* the adapter does not support bandwidth allocation mode, and %ENODATA
* if there is no active bandwidth request from the graphics driver.
*/
int usb4_dp_port_requested_bw(struct tb_port *port)
int usb4_dp_port_requested_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
......@@ -2797,3 +2830,34 @@ int usb4_dp_port_requested_bw(struct tb_port *port)
return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
}
/**
* usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
* @port: PCIe adapter
* @enable: Enable/disable extended encapsulation
*
* Enables or disables extended encapsulation used in PCIe tunneling. Caller
* needs to make sure both adapters support this before enabling. Returns %0 on
* success and negative errno otherwise.
*/
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
{
u32 val;
int ret;
if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
if (ret)
return ret;
if (enable)
val |= ADP_PCIE_CS_1_EE;
else
val &= ~ADP_PCIE_CS_1_EE;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
}
......@@ -537,9 +537,8 @@ static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
struct tb_xdomain *xd, u8 sequence)
{
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
struct tb_xdp_link_state_status_response res;
struct tb_port *port = tb_port_at(xd->route, sw);
struct tb_port *port = tb_xdomain_downstream_port(xd);
u32 val[2];
int ret;
......@@ -1137,7 +1136,7 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
ret = tb_port_get_link_speed(port);
if (ret < 0)
......@@ -1251,8 +1250,7 @@ static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
unsigned int width)
{
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
struct tb_port *port = tb_port_at(xd->route, sw);
struct tb_port *port = tb_xdomain_downstream_port(xd);
struct tb *tb = xd->tb;
u8 tlw, tls;
u32 val;
......@@ -1292,13 +1290,16 @@ static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
{
unsigned int width, width_mask;
struct tb_port *port;
int ret, width;
int ret;
if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
width = 1;
width = TB_LINK_WIDTH_SINGLE;
width_mask = width;
} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
width = 2;
width = TB_LINK_WIDTH_DUAL;
width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
} else {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
......@@ -1309,7 +1310,7 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return -ETIMEDOUT;
}
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
/*
* We can't use tb_xdomain_lane_bonding_enable() here because it
......@@ -1330,15 +1331,16 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return ret;
}
ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
dev_warn(&xd->dev, "error waiting for link width to become %d\n",
width);
width_mask);
return ret;
}
port->bonded = width == 2;
port->dual_link_port->bonded = width == 2;
port->bonded = width > TB_LINK_WIDTH_SINGLE;
port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
......@@ -1425,7 +1427,7 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->bonding_possible) {
struct tb_port *port;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (!port->bonded)
tb_port_disable(port->dual_link_port);
}
......@@ -1737,16 +1739,57 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
return sysfs_emit(buf, "%u\n", xd->link_width);
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
......@@ -1976,10 +2019,11 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
unsigned int width_mask;
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (!port->dual_link_port)
return -ENODEV;
......@@ -1999,7 +2043,12 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
/* Any of the widths are all bonded */
width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
TB_LINK_WIDTH_ASYM_RX;
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
......@@ -2024,10 +2073,13 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
struct tb_port *port;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (port->dual_link_port) {
int ret;
tb_port_lane_bonding_disable(port);
if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
if (ret == -ETIMEDOUT)
tb_port_warn(port, "timeout disabling lane bonding\n");
tb_port_disable(port->dual_link_port);
tb_port_update_credits(port);
......
......@@ -171,6 +171,20 @@ struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
/**
* enum tb_link_width - Thunderbolt/USB4 link width
* @TB_LINK_WIDTH_SINGLE: Single lane link
* @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
* @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
* @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
*/
enum tb_link_width {
TB_LINK_WIDTH_SINGLE = BIT(0),
TB_LINK_WIDTH_DUAL = BIT(1),
TB_LINK_WIDTH_ASYM_TX = BIT(2),
TB_LINK_WIDTH_ASYM_RX = BIT(3),
};
/**
* struct tb_xdomain - Cross-domain (XDomain) connection
* @dev: XDomain device
......@@ -186,7 +200,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
* @link_width: Width of the downstream facing link
* @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
......@@ -234,7 +248,7 @@ struct tb_xdomain {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
enum tb_link_width link_width;
bool link_usb4;
bool is_unplugged;
bool needs_uuid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment