Commit 849f3542 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.20-rc1' of...

Merge tag 'thunderbolt-for-v5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.20 merge window

This includes following Thunderbolt/USB4 changes for the v5.20 merge
window:

  * Enable CL1 low power link state
  * Add support for Intel Raptor Lake Thunderbolt/USB4 controller
  * A couple of typos fixed.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v5.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  thunderbolt: Fix typo in comment
  thunderbolt: Add support for Intel Raptor Lake
  thunderbolt: Fix some kernel-doc comments
  thunderbolt: Change TMU mode to HiFi uni-directional once DisplayPort tunneled
  thunderbolt: Add CL1 support for USB4 and Titan Ridge routers
  thunderbolt: Change downstream router's TMU rate in both TMU uni/bidir mode
  thunderbolt: Fix typos in CLx enabling
  thunderbolt: CLx disable before system suspend only if previously enabled
  thunderbolt: Silently ignore CLx enabling in case CLx is not supported
parents ebc4969a 34b9715b
......@@ -694,7 +694,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
}
/**
* tb_cfg_start() - start/resume the control channel
* tb_ctl_start() - start/resume the control channel
* @ctl: Control channel to start
*/
void tb_ctl_start(struct tb_ctl *ctl)
......@@ -710,7 +710,7 @@ void tb_ctl_start(struct tb_ctl *ctl)
}
/**
* tb_ctrl_stop() - pause the control channel
* tb_ctl_stop() - pause the control channel
* @ctl: Control channel to stop
*
* All invocations of ctl->callback will have finished after this method
......@@ -912,7 +912,7 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
}
/**
* tb_cfg_write() - write from buffer into config space
* tb_cfg_write_raw() - write from buffer into config space
* @ctl: Pointer to the control channel
* @buffer: Data to write
* @route: Route string of the router
......
......@@ -35,7 +35,7 @@ struct tb_cfg_result {
* If err = 1 then this is the port that send the
* error.
* If err = 0 and if this was a cfg_read/write then
* this is the the upstream port of the responding
* this is the upstream port of the responding
* switch.
* Otherwise the field is set to zero.
*/
......
......@@ -2516,6 +2516,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
case PCI_DEVICE_ID_INTEL_ADL_NHI0:
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
case PCI_DEVICE_ID_INTEL_RPL_NHI0:
case PCI_DEVICE_ID_INTEL_RPL_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
......
......@@ -1410,6 +1410,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
......
......@@ -80,6 +80,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
......
......@@ -3133,9 +3133,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
/*
* Actually only needed for Titan Ridge but for simplicity can be
* done for USB4 device too as CLx is re-enabled at resume.
* CL0s and CL1 are enabled and supported together.
*/
if (tb_switch_disable_clx(sw, TB_CL0S))
tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
if (tb_switch_disable_clx(sw, TB_CL1))
tb_sw_warn(sw, "failed to disable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
}
err = tb_plug_events_active(sw, false);
if (err)
......@@ -3426,13 +3430,12 @@ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
}
switch (clx) {
case TB_CL0S:
/* CL0s support requires also CL1 support */
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
break;
/* For now we support only CL0s. Not CL1, CL2 */
case TB_CL1:
/* For now we support only CL0s and CL1. Not CL2 */
case TB_CL2:
default:
return false;
......@@ -3446,18 +3449,18 @@ static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
return !!(val & mask);
}
static inline bool tb_port_cl0s_supported(struct tb_port *port)
{
return tb_port_clx_supported(port, TB_CL0S);
}
static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
u32 phy, mask;
int ret;
/* To enable CL0s also required to enable CL1 */
/* CL0s and CL1 are enabled and supported together */
if (clx == TB_CL1)
mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
else
/* For now we support only CL0s and CL1. Not CL2 */
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
......@@ -3472,20 +3475,20 @@ static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_cl0s_disable(struct tb_port *port)
static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_cl0s_set(port, false);
return __tb_port_clx_set(port, clx, false);
}
static int tb_port_cl0s_enable(struct tb_port *port)
static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_cl0s_set(port, true);
return __tb_port_clx_set(port, clx, true);
}
static int tb_switch_enable_cl0s(struct tb_switch *sw)
static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
bool up_cl0s_support, down_cl0s_support;
bool up_clx_support, down_clx_support;
struct tb_port *up, *down;
int ret;
......@@ -3510,37 +3513,37 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
up_cl0s_support = tb_port_cl0s_supported(up);
down_cl0s_support = tb_port_cl0s_supported(down);
up_clx_support = tb_port_clx_supported(up, clx);
down_clx_support = tb_port_clx_supported(down, clx);
tb_port_dbg(up, "CL0s %ssupported\n",
up_cl0s_support ? "" : "not ");
tb_port_dbg(down, "CL0s %ssupported\n",
down_cl0s_support ? "" : "not ");
tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
up_clx_support ? "" : "not ");
tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
down_clx_support ? "" : "not ");
if (!up_cl0s_support || !down_cl0s_support)
if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
ret = tb_port_cl0s_enable(up);
ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
ret = tb_port_cl0s_enable(down);
ret = tb_port_clx_enable(down, clx);
if (ret) {
tb_port_cl0s_disable(up);
tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
tb_port_cl0s_disable(up);
tb_port_cl0s_disable(down);
tb_port_clx_disable(up, clx);
tb_port_clx_disable(down, clx);
return ret;
}
sw->clx = TB_CL0S;
sw->clx = clx;
tb_port_dbg(up, "CL0s enabled\n");
tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
return 0;
}
......@@ -3554,7 +3557,7 @@ static int tb_switch_enable_cl0s(struct tb_switch *sw)
* to improve performance. CLx is enabled only if both sides of the link
* support CLx, and if both sides of the link are not configured as two
* single lane links and only if the link is not inter-domain link. The
* complete set of conditions is descibed in CM Guide 1.0 section 8.1.
* complete set of conditions is described in CM Guide 1.0 section 8.1.
*
* Return: Returns 0 on success or an error code on failure.
*/
......@@ -3573,15 +3576,16 @@ int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
return 0;
switch (clx) {
case TB_CL0S:
return tb_switch_enable_cl0s(sw);
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
return __tb_switch_enable_clx(sw, clx);
default:
return -EOPNOTSUPP;
}
}
static int tb_switch_disable_cl0s(struct tb_switch *sw)
static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
......@@ -3603,17 +3607,17 @@ static int tb_switch_disable_cl0s(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_port_cl0s_disable(up);
ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
ret = tb_port_cl0s_disable(down);
ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = TB_CLX_DISABLE;
tb_port_dbg(up, "CL0s disabled\n");
tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
return 0;
}
......@@ -3630,8 +3634,9 @@ int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
return 0;
switch (clx) {
case TB_CL0S:
return tb_switch_disable_cl0s(sw);
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
return __tb_switch_disable_clx(sw, clx);
default:
return -EOPNOTSUPP;
......
......@@ -118,6 +118,13 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
/*
* In case of DP tunnel exists, change host router's
* 1st children TMU mode to HiFi for CL0s to work.
*/
if (tunnel)
tb_switch_enable_tmu_1st_child(tb->root_switch,
TB_SWITCH_TMU_RATE_HIFI);
break;
case TB_TYPE_PCIE_DOWN:
......@@ -215,7 +222,7 @@ static int tb_enable_tmu(struct tb_switch *sw)
int ret;
/* If it is already enabled in correct mode, don't touch it */
if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
ret = tb_switch_tmu_disable(sw);
......@@ -575,6 +582,7 @@ static void tb_scan_port(struct tb_port *port)
struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port;
struct tb_switch *sw;
int ret;
if (tb_is_upstream_port(port))
return;
......@@ -663,11 +671,24 @@ static void tb_scan_port(struct tb_port *port)
tb_switch_lane_bonding_enable(sw);
/* Set the link configured */
tb_switch_configure_link(sw);
if (tb_switch_enable_clx(sw, TB_CL0S))
tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
ret = tb_switch_enable_clx(sw, TB_CL1);
if (ret && ret != -EOPNOTSUPP)
tb_sw_warn(sw, "failed to enable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
tb_switch_is_clx_enabled(sw));
if (tb_switch_is_clx_enabled(sw, TB_CL1))
/*
* To support highest CLx state, we set router's TMU to
* Normal-Uni mode.
*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
else
/* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
......@@ -965,6 +986,12 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
*/
tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
return;
err_free:
......@@ -1407,7 +1434,12 @@ static int tb_start(struct tb *tb)
return ret;
}
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
/*
* To support highest CLx state, we set host router's TMU to
* Normal mode.
*/
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
false);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
......@@ -1446,19 +1478,31 @@ static int tb_suspend_noirq(struct tb *tb)
static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
int ret;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
if (tb_switch_enable_clx(sw, TB_CL0S))
tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx re-enabling in case CLx is not supported.
*/
ret = tb_switch_enable_clx(sw, TB_CL1);
if (ret && ret != -EOPNOTSUPP)
tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
if (tb_switch_is_clx_enabled(sw, TB_CL1))
/*
* tb_switch_tmu_configure() was already called when the switch was
* added before entering system sleep or runtime suspend,
* so no need to call it again before enabling TMU.
* To support highest CLx state, we set router's TMU to
* Normal-Uni mode.
*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
else
/* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
......
......@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/thunderbolt.h>
#include <linux/uuid.h>
#include <linux/bitfield.h>
#include "tb_regs.h"
#include "ctl.h"
......@@ -111,7 +112,7 @@ struct tb_switch_tmu {
enum tb_clx {
TB_CLX_DISABLE,
TB_CL0S,
/* CL0s and CL1 are enabled and supported together */
TB_CL1,
TB_CL2,
};
......@@ -933,46 +934,49 @@ int tb_switch_tmu_enable(struct tb_switch *sw);
void tb_switch_tmu_configure(struct tb_switch *sw,
enum tb_switch_tmu_rate rate,
bool unidirectional);
void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
enum tb_switch_tmu_rate rate);
/**
* tb_switch_tmu_hifi_is_enabled() - Checks if the specified TMU mode is enabled
* tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
* @sw: Router whose TMU mode to check
* @unidirectional: If uni-directional (bi-directional otherwise)
*
* Return true if hardware TMU configuration matches the one passed in
* as parameter. That is HiFi and either uni-directional or bi-directional.
* as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
*/
static inline bool tb_switch_tmu_hifi_is_enabled(const struct tb_switch *sw,
static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
bool unidirectional)
{
return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
return sw->tmu.rate == sw->tmu.rate_request &&
sw->tmu.unidirectional == unidirectional;
}
static inline const char *tb_switch_clx_name(enum tb_clx clx)
{
switch (clx) {
/* CL0s and CL1 are enabled and supported together */
case TB_CL1:
return "CL0s/CL1";
default:
return "unknown";
}
}
int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
/**
* tb_switch_is_clx_enabled() - Checks if the CLx is enabled
* @sw: Router to check the CLx state for
*
* Checks if the CLx is enabled on the router upstream link.
* Not applicable for a host router.
*/
static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw)
{
return sw->clx != TB_CLX_DISABLE;
}
/**
* tb_switch_is_cl0s_enabled() - Checks if the CL0s is enabled
* @sw: Router to check for the CL0s
* @sw: Router to check for the CLx
* @clx: The CLx state to check for
*
* Checks if the CL0s is enabled on the router upstream link.
* Checks if the specified CLx is enabled on the router upstream link.
* Not applicable for a host router.
*/
static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw)
static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
enum tb_clx clx)
{
return sw->clx == TB_CL0S;
return sw->clx == clx;
}
/**
......
......@@ -234,6 +234,7 @@ enum usb4_switch_op {
/* Router TMU configuration */
#define TMU_RTR_CS_0 0x00
#define TMU_RTR_CS_0_FREQ_WIND_MASK GENMASK(26, 16)
#define TMU_RTR_CS_0_TD BIT(27)
#define TMU_RTR_CS_0_UCAP BIT(30)
#define TMU_RTR_CS_1 0x01
......@@ -244,6 +245,11 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
#define TMU_RTR_CS_15 0xf
#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0)
#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6)
#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12)
#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18)
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
#define TMU_RTR_CS_25 0x19
......
......@@ -11,6 +11,55 @@
#include "tb.h"
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
enum tb_switch_tmu_rate rate)
{
u32 freq_meas_wind[2] = { 30, 800 };
u32 avg_const[2] = { 4, 8 };
u32 freq, avg, val;
int ret;
if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
freq = freq_meas_wind[0];
avg = avg_const[0];
} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
freq = freq_meas_wind[1];
avg = avg_const[1];
} else {
return 0;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
~TMU_RTR_CS_15_DELAY_AVG_MASK &
~TMU_RTR_CS_15_OFFSET_AVG_MASK &
~TMU_RTR_CS_15_ERROR_AVG_MASK;
val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
}
static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
{
bool root_switch = !tb_route(sw);
......@@ -348,7 +397,7 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
if (tb_route(sw)) {
bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
bool unidirectional = sw->tmu.unidirectional;
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down, *up;
int ret;
......@@ -359,12 +408,13 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* In case of uni-directional time sync, TMU handshake is
* initiated by upstream router. In case of bi-directional
* time sync, TMU handshake is initiated by downstream router.
* Therefore, we change the rate to off in the respective
* router.
* We change downstream router's rate to off for both uni/bidir
* cases although it is needed only for the bi-directional mode.
* We avoid changing upstream router's mode since it might
* have another downstream router plugged, that is set to
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
if (unidirectional)
tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
else
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_port_tmu_time_sync_disable(up);
......@@ -411,6 +461,7 @@ static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
else
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
tb_port_tmu_unidirectional_disable(down);
tb_port_tmu_unidirectional_disable(up);
}
......@@ -492,7 +543,11 @@ static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
if (ret)
return ret;
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
if (ret)
return ret;
......@@ -519,7 +574,83 @@ static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
return ret;
}
static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down, *up;
down = tb_port_at(tb_route(sw), parent);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when change mode,
* get back to the TMU configurations in previous mode.
* In case of additional failures in the functions below,
* ignore them since the caller shall already report a failure.
*/
tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
if (sw->tmu.unidirectional_request)
tb_switch_tmu_rate_write(parent, sw->tmu.rate);
else
tb_switch_tmu_rate_write(sw, sw->tmu.rate);
tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
}
static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
if (ret)
goto out;
if (sw->tmu.unidirectional_request)
ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
else
ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
if (ret)
return ret;
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
if (ret)
return ret;
ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(up);
if (ret)
goto out;
return 0;
out:
__tb_switch_tmu_change_mode_prev(sw);
return ret;
}
/**
* tb_switch_tmu_enable() - Enable TMU on a router
* @sw: Router whose TMU to enable
*
* Enables TMU of a router to be in uni-directional Normal/HiFi
* or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
* before calling this function, to select the mode Normal/HiFi and
* directionality (uni-directional/bi-directional).
* In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
* work. Uni-directional mode is required for CLx (Link Low-Power) to work.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
bool unidirectional = sw->tmu.unidirectional_request;
int ret;
......@@ -535,12 +666,15 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
if (!tb_switch_is_clx_supported(sw))
return 0;
if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
if (tb_switch_is_titan_ridge(sw) && unidirectional) {
/* Titan Ridge supports only CL0s */
if (!tb_switch_is_cl0s_enabled(sw))
/*
* Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
* enabled and supported together.
*/
if (!tb_switch_is_clx_enabled(sw, TB_CL1))
return -EOPNOTSUPP;
ret = tb_switch_tmu_objection_mask(sw);
......@@ -557,7 +691,11 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
return ret;
if (tb_route(sw)) {
/* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
/*
* The used mode changes are from OFF to
* HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
* HiFi-Uni.
*/
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
if (unidirectional)
ret = __tb_switch_tmu_enable_unidirectional(sw);
......@@ -565,6 +703,10 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
ret = __tb_switch_tmu_enable_bidirectional(sw);
if (ret)
return ret;
} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
ret = __tb_switch_tmu_change_mode(sw);
if (ret)
return ret;
}
sw->tmu.unidirectional = unidirectional;
} else {
......@@ -574,39 +716,21 @@ static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
* of the child node - see above.
* Here only the host router' rate configuration is written.
*/
ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
if (ret)
return ret;
}
sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
sw->tmu.rate = sw->tmu.rate_request;
tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
/**
* tb_switch_tmu_enable() - Enable TMU on a router
* @sw: Router whose TMU to enable
*
* Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
* Calling tb_switch_tmu_configure() is required before calling this function,
* to select the mode HiFi and directionality (uni-directional/bi-directional).
* In both modes all tunneling should work. Uni-directional mode is required for
* CLx (Link Low-Power) to work.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
return -EOPNOTSUPP;
return tb_switch_tmu_hifi_enable(sw);
}
/**
* tb_switch_tmu_configure() - Configure the TMU rate and directionality
* @sw: Router whose mode to change
* @rate: Rate to configure Off/LowRes/HiFi
* @rate: Rate to configure Off/Normal/HiFi
* @unidirectional: If uni-directional (bi-directional otherwise)
*
* Selects the rate of the TMU and directionality (uni-directional or
......@@ -618,3 +742,32 @@ void tb_switch_tmu_configure(struct tb_switch *sw,
sw->tmu.unidirectional_request = unidirectional;
sw->tmu.rate_request = rate;
}
static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
{
if (tb_is_switch(dev)) {
struct tb_switch *sw = tb_to_switch(dev);
tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
tb_switch_is_clx_enabled(sw, TB_CL1));
if (tb_switch_tmu_enable(sw))
tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
}
return 0;
}
/**
* tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
* @sw: The router to configure and enable it's children TMU
* @rate: Rate of the TMU to configure the router's chidren to
*
* Configures and enables the TMU mode of 1st depth children of the specified
* router to the specified rate.
*/
void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
enum tb_switch_tmu_rate rate)
{
device_for_each_child(&sw->dev, &rate,
tb_switch_tmu_config_enable);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment