Commit 5367f82a authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.13-rc1' of...

Merge tag 'thunderbolt-for-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.13 merge window

This includes following Thunderbolt/USB4 changes for v5.13 merge window:

  * Debugfs improvements

  * Align the inter-domain (peer-to-peer) support with the USB4
    inter-domain spec for better interoperability

  * Add support for USB4 DROM and the new product descriptor

  * More KUnit tests

  * Detailed uevent for routers

  * Few miscellaneous improvements

All these have been in linux-next without reported issues.

* tag 'thunderbolt-for-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (24 commits)
  thunderbolt: Hide authorized attribute if router does not support PCIe tunnels
  thunderbolt: Add details to router uevent
  thunderbolt: Unlock on error path in tb_domain_add()
  thunderbolt: Add support for USB4 DROM
  thunderbolt: Check quirks in tb_switch_add()
  thunderbolt: Add KUnit tests for DMA tunnels
  thunderbolt: Add KUnit tests for XDomain properties
  net: thunderbolt: Align the driver to the USB4 networking spec
  thunderbolt: Allow multiple DMA tunnels over a single XDomain connection
  thunderbolt: Drop unused tb_port_set_initial_credits()
  thunderbolt: Use dedicated flow control for DMA tunnels
  thunderbolt: Add support for maxhopid XDomain property
  thunderbolt: Add tb_property_copy_dir()
  thunderbolt: Align XDomain protocol timeouts with the spec
  thunderbolt: Use pseudo-random number as initial property block generation
  thunderbolt: Do not re-establish XDomain DMA paths automatically
  thunderbolt: Add more logging to XDomain connections
  Documentation / thunderbolt: Drop speed/lanes entries for XDomain
  thunderbolt: Decrease control channel timeout for software connection manager
  thunderbolt: Do not pass timeout for tb_cfg_reset()
  ...
parents 9bc46a12 6f3badea
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain RX speed per lane.
All RX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the number of RX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain TX speed per lane.
All TX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports number of TX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
Date: Jun 2018 Date: Jun 2018
KernelVersion: 4.17 KernelVersion: 4.17
...@@ -162,6 +134,13 @@ Contact: thunderbolt-software@lists.01.org ...@@ -162,6 +134,13 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains name of this device extracted from Description: This attribute contains name of this device extracted from
the device DROM. the device DROM.
What: /sys/bus/thunderbolt/devices/.../maxhopid
Date: Jul 2021
KernelVersion: 5.13
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: Only set for XDomains. The maximum HopID the other host
supports as its input HopID.
What: /sys/bus/thunderbolt/devices/.../rx_speed What: /sys/bus/thunderbolt/devices/.../rx_speed
Date: Jan 2020 Date: Jan 2020
KernelVersion: 5.5 KernelVersion: 5.5
......
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
/* Protocol timeouts in ms */ /* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500 #define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500 #define TBNET_LOGIN_TIMEOUT 500
#define TBNET_LOGOUT_TIMEOUT 100 #define TBNET_LOGOUT_TIMEOUT 1000
#define TBNET_RING_SIZE 256 #define TBNET_RING_SIZE 256
#define TBNET_LOCAL_PATH 0xf
#define TBNET_LOGIN_RETRIES 60 #define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 5 #define TBNET_LOGOUT_RETRIES 10
#define TBNET_MATCH_FRAGS_ID BIT(1) #define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K #define TBNET_MAX_MTU SZ_64K
#define TBNET_FRAME_SIZE SZ_4K #define TBNET_FRAME_SIZE SZ_4K
#define TBNET_MAX_PAYLOAD_SIZE \ #define TBNET_MAX_PAYLOAD_SIZE \
...@@ -154,8 +154,8 @@ struct tbnet_ring { ...@@ -154,8 +154,8 @@ struct tbnet_ring {
* @login_sent: ThunderboltIP login message successfully sent * @login_sent: ThunderboltIP login message successfully sent
* @login_received: ThunderboltIP login message received from the remote * @login_received: ThunderboltIP login message received from the remote
* host * host
* @transmit_path: HopID the other end needs to use building the * @local_transmit_path: HopID we are using to send out packets
* opposite side path. * @remote_transmit_path: HopID the other end is using to send packets to us
* @connection_lock: Lock serializing access to @login_sent, * @connection_lock: Lock serializing access to @login_sent,
* @login_received and @transmit_path. * @login_received and @transmit_path.
* @login_retries: Number of login retries currently done * @login_retries: Number of login retries currently done
...@@ -184,7 +184,8 @@ struct tbnet { ...@@ -184,7 +184,8 @@ struct tbnet {
atomic_t command_id; atomic_t command_id;
bool login_sent; bool login_sent;
bool login_received; bool login_received;
u32 transmit_path; int local_transmit_path;
int remote_transmit_path;
struct mutex connection_lock; struct mutex connection_lock;
int login_retries; int login_retries;
struct delayed_work login_work; struct delayed_work login_work;
...@@ -257,7 +258,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence) ...@@ -257,7 +258,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence)
atomic_inc_return(&net->command_id)); atomic_inc_return(&net->command_id));
request.proto_version = TBIP_LOGIN_PROTO_VERSION; request.proto_version = TBIP_LOGIN_PROTO_VERSION;
request.transmit_path = TBNET_LOCAL_PATH; request.transmit_path = net->local_transmit_path;
return tb_xdomain_request(xd, &request, sizeof(request), return tb_xdomain_request(xd, &request, sizeof(request),
TB_CFG_PKG_XDOMAIN_RESP, &reply, TB_CFG_PKG_XDOMAIN_RESP, &reply,
...@@ -364,10 +365,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -364,10 +365,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
if (net->login_sent && net->login_received) { if (net->login_sent && net->login_received) {
int retries = TBNET_LOGOUT_RETRIES; int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) { while (send_logout && retries-- > 0) {
int ret = tbnet_logout_request(net); ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT) if (ret != -ETIMEDOUT)
break; break;
} }
...@@ -377,8 +378,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -377,8 +378,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
tbnet_free_buffers(&net->rx_ring); tbnet_free_buffers(&net->rx_ring);
tbnet_free_buffers(&net->tx_ring); tbnet_free_buffers(&net->tx_ring);
if (tb_xdomain_disable_paths(net->xd)) ret = tb_xdomain_disable_paths(net->xd,
net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret)
netdev_warn(net->dev, "failed to disable DMA paths\n"); netdev_warn(net->dev, "failed to disable DMA paths\n");
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
net->remote_transmit_path = 0;
} }
net->login_retries = 0; net->login_retries = 0;
...@@ -424,7 +433,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) ...@@ -424,7 +433,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
if (!ret) { if (!ret) {
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
net->login_received = true; net->login_received = true;
net->transmit_path = pkg->transmit_path; net->remote_transmit_path = pkg->transmit_path;
/* If we reached the number of max retries or /* If we reached the number of max retries or
* previous logout, schedule another round of * previous logout, schedule another round of
...@@ -597,12 +606,18 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -597,12 +606,18 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected) if (!connected)
return; return;
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n");
return;
}
/* Both logins successful so enable the high-speed DMA paths and /* Both logins successful so enable the high-speed DMA paths and
* start the network device queue. * start the network device queue.
*/ */
ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop, net->rx_ring.ring->hop,
net->transmit_path, net->remote_transmit_path,
net->tx_ring.ring->hop); net->tx_ring.ring->hop);
if (ret) { if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n"); netdev_err(net->dev, "failed to enable DMA paths\n");
...@@ -629,6 +644,7 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -629,6 +644,7 @@ static void tbnet_connected_work(struct work_struct *work)
err_stop_rings: err_stop_rings:
tb_ring_stop(net->rx_ring.ring); tb_ring_stop(net->rx_ring.ring);
tb_ring_stop(net->tx_ring.ring); tb_ring_stop(net->tx_ring.ring);
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
} }
static void tbnet_login_work(struct work_struct *work) static void tbnet_login_work(struct work_struct *work)
...@@ -851,6 +867,7 @@ static int tbnet_open(struct net_device *dev) ...@@ -851,6 +867,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd; struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask; u16 sof_mask, eof_mask;
struct tb_ring *ring; struct tb_ring *ring;
int hopid;
netif_carrier_off(dev); netif_carrier_off(dev);
...@@ -862,6 +879,15 @@ static int tbnet_open(struct net_device *dev) ...@@ -862,6 +879,15 @@ static int tbnet_open(struct net_device *dev)
} }
net->tx_ring.ring = ring; net->tx_ring.ring = ring;
hopid = tb_xdomain_alloc_out_hopid(xd, -1);
if (hopid < 0) {
netdev_err(dev, "failed to allocate Tx HopID\n");
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;
return hopid;
}
net->local_transmit_path = hopid;
sof_mask = BIT(TBIP_PDF_FRAME_START); sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END); eof_mask = BIT(TBIP_PDF_FRAME_END);
...@@ -893,6 +919,8 @@ static int tbnet_stop(struct net_device *dev) ...@@ -893,6 +919,8 @@ static int tbnet_stop(struct net_device *dev)
tb_ring_free(net->rx_ring.ring); tb_ring_free(net->rx_ring.ring);
net->rx_ring.ring = NULL; net->rx_ring.ring = NULL;
tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
tb_ring_free(net->tx_ring.ring); tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL; net->tx_ring.ring = NULL;
...@@ -1340,7 +1368,7 @@ static int __init tbnet_init(void) ...@@ -1340,7 +1368,7 @@ static int __init tbnet_init(void)
* the moment. * the moment.
*/ */
tb_property_add_immediate(tbnet_dir, "prtcstns", tb_property_add_immediate(tbnet_dir, "prtcstns",
TBNET_MATCH_FRAGS_ID); TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
ret = tb_register_property_dir("network", tbnet_dir); ret = tb_register_property_dir("network", tbnet_dir);
if (ret) { if (ret) {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define TB_CTL_RX_PKG_COUNT 10 #define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4 #define TB_CTL_RETRIES 1
/** /**
* struct tb_ctl - Thunderbolt control channel * struct tb_ctl - Thunderbolt control channel
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* @request_queue_lock: Lock protecting @request_queue * @request_queue_lock: Lock protecting @request_queue
* @request_queue: List of outstanding requests * @request_queue: List of outstanding requests
* @running: Is the control channel running at the moment * @running: Is the control channel running at the moment
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received * @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback * @callback_data: Data passed to @callback
*/ */
...@@ -43,6 +44,7 @@ struct tb_ctl { ...@@ -43,6 +44,7 @@ struct tb_ctl {
struct list_head request_queue; struct list_head request_queue;
bool running; bool running;
int timeout_msec;
event_cb callback; event_cb callback;
void *callback_data; void *callback_data;
}; };
...@@ -613,6 +615,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, ...@@ -613,6 +615,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/** /**
* tb_ctl_alloc() - allocate a control channel * tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI * @nhi: Pointer to NHI
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events * @cb: Callback called for plug events
* @cb_data: Data passed to @cb * @cb_data: Data passed to @cb
* *
...@@ -620,13 +623,15 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, ...@@ -620,13 +623,15 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
* *
* Return: Returns a pointer on success or NULL on failure. * Return: Returns a pointer on success or NULL on failure.
*/ */
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
void *cb_data)
{ {
int i; int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl) if (!ctl)
return NULL; return NULL;
ctl->nhi = nhi; ctl->nhi = nhi;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb; ctl->callback = cb;
ctl->callback_data = cb_data; ctl->callback_data = cb_data;
...@@ -802,14 +807,12 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) ...@@ -802,14 +807,12 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
* tb_cfg_reset() - send a reset packet and wait for a response * tb_cfg_reset() - send a reset packet and wait for a response
* @ctl: Control channel pointer * @ctl: Control channel pointer
* @route: Router string for the router to send reset * @route: Router string for the router to send reset
* @timeout_msec: Timeout in ms how long to wait for the response
* *
* If the switch at route is incorrectly configured then we will not receive a * If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for * reply (even though the switch will reset). The caller should check for
* -ETIMEDOUT and attempt to reconfigure the switch. * -ETIMEDOUT and attempt to reconfigure the switch.
*/ */
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
int timeout_msec)
{ {
struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
struct tb_cfg_result res = { 0 }; struct tb_cfg_result res = { 0 };
...@@ -831,7 +834,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, ...@@ -831,7 +834,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
req->response_size = sizeof(reply); req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_RESET; req->response_type = TB_CFG_PKG_RESET;
res = tb_cfg_request_sync(ctl, req, timeout_msec); res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
tb_cfg_request_put(req); tb_cfg_request_put(req);
...@@ -1007,7 +1010,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, ...@@ -1007,7 +1010,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length) enum tb_cfg_space space, u32 offset, u32 length)
{ {
struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT); space, offset, length, ctl->timeout_msec);
switch (res.err) { switch (res.err) {
case 0: case 0:
/* Success */ /* Success */
...@@ -1033,7 +1036,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, ...@@ -1033,7 +1036,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length) enum tb_cfg_space space, u32 offset, u32 length)
{ {
struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT); space, offset, length, ctl->timeout_msec);
switch (res.err) { switch (res.err) {
case 0: case 0:
/* Success */ /* Success */
...@@ -1071,7 +1074,7 @@ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) ...@@ -1071,7 +1074,7 @@ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
u32 dummy; u32 dummy;
struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
TB_CFG_SWITCH, 0, 1, TB_CFG_SWITCH, 0, 1,
TB_CFG_DEFAULT_TIMEOUT); ctl->timeout_msec);
if (res.err == 1) if (res.err == 1)
return -EIO; return -EIO;
if (res.err) if (res.err)
......
...@@ -21,15 +21,14 @@ struct tb_ctl; ...@@ -21,15 +21,14 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type, typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size); const void *buf, size_t size);
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data); struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl); void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl); void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl); void tb_ctl_free(struct tb_ctl *ctl);
/* configuration commands */ /* configuration commands */
#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
struct tb_cfg_result { struct tb_cfg_result {
u64 response_route; u64 response_route;
u32 response_port; /* u32 response_port; /*
...@@ -124,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route) ...@@ -124,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
} }
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route);
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, enum tb_cfg_space space, u32 offset,
......
...@@ -251,6 +251,29 @@ static ssize_t counters_write(struct file *file, const char __user *user_buf, ...@@ -251,6 +251,29 @@ static ssize_t counters_write(struct file *file, const char __user *user_buf,
return ret < 0 ? ret : count; return ret < 0 ? ret : count;
} }
static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap,
unsigned int offset, u8 cap_id, u8 vsec_id,
int dwords)
{
int i, ret;
u32 data;
for (i = 0; i < dwords; i++) {
if (port)
ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1);
else
ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1);
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i);
continue;
}
seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i,
offset + i, cap_id, vsec_id, data);
}
}
static void cap_show(struct seq_file *s, struct tb_switch *sw, static void cap_show(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap, u8 cap_id, struct tb_port *port, unsigned int cap, u8 cap_id,
u8 vsec_id, int length) u8 vsec_id, int length)
...@@ -267,10 +290,7 @@ static void cap_show(struct seq_file *s, struct tb_switch *sw, ...@@ -267,10 +290,7 @@ static void cap_show(struct seq_file *s, struct tb_switch *sw,
else else
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords); ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
if (ret) { if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length);
cap + offset);
if (dwords > 1)
seq_printf(s, "0x%04x ...\n", cap + offset + 1);
return; return;
} }
...@@ -341,15 +361,6 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s, ...@@ -341,15 +361,6 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
} else { } else {
length = header.extended_short.length; length = header.extended_short.length;
vsec_id = header.extended_short.vsec_id; vsec_id = header.extended_short.vsec_id;
/*
* Ice Lake and Tiger Lake do not implement the
* full length of the capability, only first 32
* dwords so hard-code it here.
*/
if (!vsec_id &&
(tb_switch_is_ice_lake(port->sw) ||
tb_switch_is_tiger_lake(port->sw)))
length = 32;
} }
break; break;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/thunderbolt.h> #include <linux/thunderbolt.h>
#define DMA_TEST_HOPID 8
#define DMA_TEST_TX_RING_SIZE 64 #define DMA_TEST_TX_RING_SIZE 64
#define DMA_TEST_RX_RING_SIZE 256 #define DMA_TEST_RX_RING_SIZE 256
#define DMA_TEST_FRAME_SIZE SZ_4K #define DMA_TEST_FRAME_SIZE SZ_4K
...@@ -72,7 +71,9 @@ static const char * const dma_test_result_names[] = { ...@@ -72,7 +71,9 @@ static const char * const dma_test_result_names[] = {
* @svc: XDomain service the driver is bound to * @svc: XDomain service the driver is bound to
* @xd: XDomain the service belongs to * @xd: XDomain the service belongs to
* @rx_ring: Software ring holding RX frames * @rx_ring: Software ring holding RX frames
* @rx_hopid: HopID used for receiving frames
* @tx_ring: Software ring holding TX frames * @tx_ring: Software ring holding TX frames
* @tx_hopid: HopID used for sending fames
* @packets_to_send: Number of packets to send * @packets_to_send: Number of packets to send
* @packets_to_receive: Number of packets to receive * @packets_to_receive: Number of packets to receive
* @packets_sent: Actual number of packets sent * @packets_sent: Actual number of packets sent
...@@ -92,7 +93,9 @@ struct dma_test { ...@@ -92,7 +93,9 @@ struct dma_test {
const struct tb_service *svc; const struct tb_service *svc;
struct tb_xdomain *xd; struct tb_xdomain *xd;
struct tb_ring *rx_ring; struct tb_ring *rx_ring;
int rx_hopid;
struct tb_ring *tx_ring; struct tb_ring *tx_ring;
int tx_hopid;
unsigned int packets_to_send; unsigned int packets_to_send;
unsigned int packets_to_receive; unsigned int packets_to_receive;
unsigned int packets_sent; unsigned int packets_sent;
...@@ -119,10 +122,12 @@ static void *dma_test_pattern; ...@@ -119,10 +122,12 @@ static void *dma_test_pattern;
static void dma_test_free_rings(struct dma_test *dt) static void dma_test_free_rings(struct dma_test *dt)
{ {
if (dt->rx_ring) { if (dt->rx_ring) {
tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
tb_ring_free(dt->rx_ring); tb_ring_free(dt->rx_ring);
dt->rx_ring = NULL; dt->rx_ring = NULL;
} }
if (dt->tx_ring) { if (dt->tx_ring) {
tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
tb_ring_free(dt->tx_ring); tb_ring_free(dt->tx_ring);
dt->tx_ring = NULL; dt->tx_ring = NULL;
} }
...@@ -151,6 +156,14 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -151,6 +156,14 @@ static int dma_test_start_rings(struct dma_test *dt)
dt->tx_ring = ring; dt->tx_ring = ring;
e2e_tx_hop = ring->hop; e2e_tx_hop = ring->hop;
ret = tb_xdomain_alloc_out_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
}
dt->tx_hopid = ret;
} }
if (dt->packets_to_receive) { if (dt->packets_to_receive) {
...@@ -168,11 +181,19 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -168,11 +181,19 @@ static int dma_test_start_rings(struct dma_test *dt)
} }
dt->rx_ring = ring; dt->rx_ring = ring;
ret = tb_xdomain_alloc_in_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
} }
ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID, dt->rx_hopid = ret;
}
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0, dt->tx_ring ? dt->tx_ring->hop : 0,
DMA_TEST_HOPID, dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0); dt->rx_ring ? dt->rx_ring->hop : 0);
if (ret) { if (ret) {
dma_test_free_rings(dt); dma_test_free_rings(dt);
...@@ -189,12 +210,18 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -189,12 +210,18 @@ static int dma_test_start_rings(struct dma_test *dt)
static void dma_test_stop_rings(struct dma_test *dt) static void dma_test_stop_rings(struct dma_test *dt)
{ {
int ret;
if (dt->rx_ring) if (dt->rx_ring)
tb_ring_stop(dt->rx_ring); tb_ring_stop(dt->rx_ring);
if (dt->tx_ring) if (dt->tx_ring)
tb_ring_stop(dt->tx_ring); tb_ring_stop(dt->tx_ring);
if (tb_xdomain_disable_paths(dt->xd)) ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
dma_test_free_rings(dt); dma_test_free_rings(dt);
......
...@@ -341,9 +341,34 @@ struct device_type tb_domain_type = { ...@@ -341,9 +341,34 @@ struct device_type tb_domain_type = {
.release = tb_domain_release, .release = tb_domain_release,
}; };
static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
return true;
}
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
if (tb_is_xdomain_enabled())
return tb_xdomain_handle_request(tb, type, buf, size);
break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
}
return true;
}
/** /**
* tb_domain_alloc() - Allocate a domain * tb_domain_alloc() - Allocate a domain
* @nhi: Pointer to the host controller * @nhi: Pointer to the host controller
* @timeout_msec: Control channel timeout for non-raw messages
* @privsize: Size of the connection manager private data * @privsize: Size of the connection manager private data
* *
* Allocates and initializes a new Thunderbolt domain. Connection * Allocates and initializes a new Thunderbolt domain. Connection
...@@ -355,7 +380,7 @@ struct device_type tb_domain_type = { ...@@ -355,7 +380,7 @@ struct device_type tb_domain_type = {
* *
* Return: allocated domain structure on %NULL in case of error * Return: allocated domain structure on %NULL in case of error
*/ */
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
{ {
struct tb *tb; struct tb *tb;
...@@ -382,6 +407,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -382,6 +407,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
if (!tb->wq) if (!tb->wq)
goto err_remove_ida; goto err_remove_ida;
tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
tb->dev.parent = &nhi->pdev->dev; tb->dev.parent = &nhi->pdev->dev;
tb->dev.bus = &tb_bus_type; tb->dev.bus = &tb_bus_type;
tb->dev.type = &tb_domain_type; tb->dev.type = &tb_domain_type;
...@@ -391,6 +420,8 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -391,6 +420,8 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
return tb; return tb;
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida: err_remove_ida:
ida_simple_remove(&tb_domain_ida, tb->index); ida_simple_remove(&tb_domain_ida, tb->index);
err_free: err_free:
...@@ -399,30 +430,6 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -399,30 +430,6 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
return NULL; return NULL;
} }
static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
return true;
}
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
if (tb_is_xdomain_enabled())
return tb_xdomain_handle_request(tb, type, buf, size);
break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
}
return true;
}
/** /**
* tb_domain_add() - Add domain to the system * tb_domain_add() - Add domain to the system
* @tb: Domain to add * @tb: Domain to add
...@@ -442,13 +449,6 @@ int tb_domain_add(struct tb *tb) ...@@ -442,13 +449,6 @@ int tb_domain_add(struct tb *tb)
return -EINVAL; return -EINVAL;
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
if (!tb->ctl) {
ret = -ENOMEM;
goto err_unlock;
}
/* /*
* tb_schedule_hotplug_handler may be called as soon as the config * tb_schedule_hotplug_handler may be called as soon as the config
* channel is started. Thats why we have to hold the lock here. * channel is started. Thats why we have to hold the lock here.
...@@ -493,7 +493,6 @@ int tb_domain_add(struct tb *tb) ...@@ -493,7 +493,6 @@ int tb_domain_add(struct tb *tb)
device_del(&tb->dev); device_del(&tb->dev);
err_ctl_stop: err_ctl_stop:
tb_ctl_stop(tb->ctl); tb_ctl_stop(tb->ctl);
err_unlock:
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
return ret; return ret;
...@@ -793,6 +792,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) ...@@ -793,6 +792,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
* tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
* @tb: Domain enabling the DMA paths * @tb: Domain enabling the DMA paths
* @xd: XDomain DMA paths are created to * @xd: XDomain DMA paths are created to
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
* *
* Calls connection manager specific method to enable DMA paths to the * Calls connection manager specific method to enable DMA paths to the
* XDomain in question. * XDomain in question.
...@@ -801,18 +804,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) ...@@ -801,18 +804,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
* particular returns %-ENOTSUPP if the connection manager * particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains. * implementation does not support XDomains.
*/ */
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!tb->cm_ops->approve_xdomain_paths) if (!tb->cm_ops->approve_xdomain_paths)
return -ENOTSUPP; return -ENOTSUPP;
return tb->cm_ops->approve_xdomain_paths(tb, xd); return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
} }
/** /**
* tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
* @tb: Domain disabling the DMA paths * @tb: Domain disabling the DMA paths
* @xd: XDomain whose DMA paths are disconnected * @xd: XDomain whose DMA paths are disconnected
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
* *
* Calls connection manager specific method to disconnect DMA paths to * Calls connection manager specific method to disconnect DMA paths to
* the XDomain in question. * the XDomain in question.
...@@ -821,12 +831,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -821,12 +831,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* particular returns %-ENOTSUPP if the connection manager * particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains. * implementation does not support XDomains.
*/ */
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!tb->cm_ops->disconnect_xdomain_paths) if (!tb->cm_ops->disconnect_xdomain_paths)
return -ENOTSUPP; return -ENOTSUPP;
return tb->cm_ops->disconnect_xdomain_paths(tb, xd); return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
} }
static int disconnect_xdomain(struct device *dev, void *data) static int disconnect_xdomain(struct device *dev, void *data)
...@@ -837,7 +850,7 @@ static int disconnect_xdomain(struct device *dev, void *data) ...@@ -837,7 +850,7 @@ static int disconnect_xdomain(struct device *dev, void *data)
xd = tb_to_xdomain(dev); xd = tb_to_xdomain(dev);
if (xd && xd->tb == tb) if (xd && xd->tb == tb)
ret = tb_xdomain_disable_paths(xd); ret = tb_xdomain_disable_all_paths(xd);
return ret; return ret;
} }
......
...@@ -277,6 +277,16 @@ struct tb_drom_entry_port { ...@@ -277,6 +277,16 @@ struct tb_drom_entry_port {
u8 unknown4:2; u8 unknown4:2;
} __packed; } __packed;
/* USB4 product descriptor */
struct tb_drom_entry_desc {
struct tb_drom_entry_header header;
u16 bcdUSBSpec;
u16 idVendor;
u16 idProduct;
u16 bcdProductFWRevision;
u32 TID;
u8 productHWRevision;
};
/** /**
* tb_drom_read_uid_only() - Read UID directly from DROM * tb_drom_read_uid_only() - Read UID directly from DROM
...@@ -329,6 +339,16 @@ static int tb_drom_parse_entry_generic(struct tb_switch *sw, ...@@ -329,6 +339,16 @@ static int tb_drom_parse_entry_generic(struct tb_switch *sw,
if (!sw->device_name) if (!sw->device_name)
return -ENOMEM; return -ENOMEM;
break; break;
case 9: {
const struct tb_drom_entry_desc *desc =
(const struct tb_drom_entry_desc *)entry;
if (!sw->vendor && !sw->device) {
sw->vendor = desc->idVendor;
sw->device = desc->idProduct;
}
break;
}
} }
return 0; return 0;
...@@ -521,6 +541,51 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, ...@@ -521,6 +541,51 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
return tb_eeprom_read_n(sw, offset, val, count); return tb_eeprom_read_n(sw, offset, val, count);
} }
static int tb_drom_parse(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc8((u8 *) &header->uid, 8);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"DROM UID CRC8 mismatch (expected: %#x, got: %#x), aborting\n",
header->uid_crc8, crc);
return -EINVAL;
}
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
}
return tb_drom_parse_entries(sw);
}
static int usb4_drom_parse(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
header->data_crc32, crc);
return -EINVAL;
}
return tb_drom_parse_entries(sw);
}
/** /**
* tb_drom_read() - Copy DROM to sw->drom and parse it * tb_drom_read() - Copy DROM to sw->drom and parse it
* @sw: Router whose DROM to read and parse * @sw: Router whose DROM to read and parse
...@@ -534,7 +599,6 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, ...@@ -534,7 +599,6 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
int tb_drom_read(struct tb_switch *sw) int tb_drom_read(struct tb_switch *sw)
{ {
u16 size; u16 size;
u32 crc;
struct tb_drom_header *header; struct tb_drom_header *header;
int res, retries = 1; int res, retries = 1;
...@@ -599,31 +663,21 @@ int tb_drom_read(struct tb_switch *sw) ...@@ -599,31 +663,21 @@ int tb_drom_read(struct tb_switch *sw)
goto err; goto err;
} }
crc = tb_crc8((u8 *) &header->uid, 8); tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
header->uid_crc8, crc);
goto err;
}
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;
tb_check_quirks(sw);
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
}
if (header->device_rom_revision > 2) switch (header->device_rom_revision) {
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n", case 3:
res = usb4_drom_parse(sw);
break;
default:
tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n",
header->device_rom_revision); header->device_rom_revision);
fallthrough;
case 1:
res = tb_drom_parse(sw);
break;
}
res = tb_drom_parse_entries(sw);
/* If the DROM parsing fails, wait a moment and retry once */ /* If the DROM parsing fails, wait a moment and retry once */
if (res == -EILSEQ && retries--) { if (res == -EILSEQ && retries--) {
tb_sw_warn(sw, "parsing DROM failed, retrying\n"); tb_sw_warn(sw, "parsing DROM failed, retrying\n");
...@@ -633,10 +687,11 @@ int tb_drom_read(struct tb_switch *sw) ...@@ -633,10 +687,11 @@ int tb_drom_read(struct tb_switch *sw)
goto parse; goto parse;
} }
return res; if (!res)
return 0;
err: err:
kfree(sw->drom); kfree(sw->drom);
sw->drom = NULL; sw->drom = NULL;
return -EIO; return -EIO;
} }
...@@ -557,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, ...@@ -557,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0; return 0;
} }
static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct icm_fr_pkg_approve_xdomain_response reply; struct icm_fr_pkg_approve_xdomain_response reply;
struct icm_fr_pkg_approve_xdomain request; struct icm_fr_pkg_approve_xdomain request;
...@@ -568,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -568,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
request.transmit_path = xd->transmit_path; request.transmit_path = transmit_path;
request.transmit_ring = xd->transmit_ring; request.transmit_ring = transmit_ring;
request.receive_path = xd->receive_path; request.receive_path = receive_path;
request.receive_ring = xd->receive_ring; request.receive_ring = receive_ring;
memset(&reply, 0, sizeof(reply)); memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
...@@ -585,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -585,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0; return 0;
} }
static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
u8 phy_port; u8 phy_port;
u8 cmd; u8 cmd;
...@@ -1122,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, ...@@ -1122,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0; return 0;
} }
static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct icm_tr_pkg_approve_xdomain_response reply; struct icm_tr_pkg_approve_xdomain_response reply;
struct icm_tr_pkg_approve_xdomain request; struct icm_tr_pkg_approve_xdomain request;
...@@ -1132,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1132,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
request.hdr.code = ICM_APPROVE_XDOMAIN; request.hdr.code = ICM_APPROVE_XDOMAIN;
request.route_hi = upper_32_bits(xd->route); request.route_hi = upper_32_bits(xd->route);
request.route_lo = lower_32_bits(xd->route); request.route_lo = lower_32_bits(xd->route);
request.transmit_path = xd->transmit_path; request.transmit_path = transmit_path;
request.transmit_ring = xd->transmit_ring; request.transmit_ring = transmit_ring;
request.receive_path = xd->receive_path; request.receive_path = receive_path;
request.receive_ring = xd->receive_ring; request.receive_ring = receive_ring;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
memset(&reply, 0, sizeof(reply)); memset(&reply, 0, sizeof(reply));
...@@ -1176,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, ...@@ -1176,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
return 0; return 0;
} }
static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
int ret; int ret;
...@@ -2416,7 +2424,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) ...@@ -2416,7 +2424,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
struct icm *icm; struct icm *icm;
struct tb *tb; struct tb *tb;
tb = tb_domain_alloc(nhi, sizeof(struct icm)); tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm));
if (!tb) if (!tb)
return NULL; return NULL;
......
...@@ -501,6 +501,77 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, ...@@ -501,6 +501,77 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
/**
* tb_property_copy_dir() - Take a deep copy of directory
* @dir: Directory to copy
*
* This function takes a deep copy of @dir and returns back the copy. In
* case of error returns %NULL. The resulting directory needs to be
* released by calling tb_property_free_dir().
*/
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
{
struct tb_property *property, *p = NULL;
struct tb_property_dir *d;
if (!dir)
return NULL;
d = tb_property_create_dir(dir->uuid);
if (!d)
return NULL;
list_for_each_entry(property, &dir->properties, list) {
struct tb_property *p;
p = tb_property_alloc(property->key, property->type);
if (!p)
goto err_free;
p->length = property->length;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
p->value.dir = tb_property_copy_dir(property->value.dir);
if (!p->value.dir)
goto err_free;
break;
case TB_PROPERTY_TYPE_DATA:
p->value.data = kmemdup(property->value.data,
property->length * 4,
GFP_KERNEL);
if (!p->value.data)
goto err_free;
break;
case TB_PROPERTY_TYPE_TEXT:
p->value.text = kzalloc(p->length * 4, GFP_KERNEL);
if (!p->value.text)
goto err_free;
strcpy(p->value.text, property->value.text);
break;
case TB_PROPERTY_TYPE_VALUE:
p->value.immediate = property->value.immediate;
break;
default:
break;
}
list_add_tail(&p->list, &d->properties);
}
return d;
err_free:
kfree(p);
tb_property_free_dir(d);
return NULL;
}
/** /**
* tb_property_add_immediate() - Add immediate property to directory * tb_property_add_immediate() - Add immediate property to directory
* @parent: Directory to add the property * @parent: Directory to add the property
......
...@@ -626,28 +626,6 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) ...@@ -626,28 +626,6 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
TB_CFG_PORT, ADP_CS_4, 1); TB_CFG_PORT, ADP_CS_4, 1);
} }
/**
* tb_port_set_initial_credits() - Set initial port link credits allocated
* @port: Port to set the initial credits
* @credits: Number of credits to to allocate
*
* Set initial credits value to be used for ingress shared buffering.
*/
int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
data &= ~ADP_CS_5_LCA_MASK;
data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/** /**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
* @port: Port whose counters to clear * @port: Port whose counters to clear
...@@ -1331,7 +1309,7 @@ int tb_switch_reset(struct tb_switch *sw) ...@@ -1331,7 +1309,7 @@ int tb_switch_reset(struct tb_switch *sw)
TB_CFG_SWITCH, 2, 2); TB_CFG_SWITCH, 2, 2);
if (res.err) if (res.err)
return res.err; return res.err;
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT); res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
if (res.err > 0) if (res.err > 0)
return -EIO; return -EIO;
return res.err; return res.err;
...@@ -1762,6 +1740,18 @@ static struct attribute *switch_attrs[] = { ...@@ -1762,6 +1740,18 @@ static struct attribute *switch_attrs[] = {
NULL, NULL,
}; };
static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
{
const struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!port->disabled && port->config.type == type)
return true;
}
return false;
}
static umode_t switch_attr_is_visible(struct kobject *kobj, static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n) struct attribute *attr, int n)
{ {
...@@ -1770,7 +1760,8 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, ...@@ -1770,7 +1760,8 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_authorized.attr) { if (attr == &dev_attr_authorized.attr) {
if (sw->tb->security_level == TB_SECURITY_NOPCIE || if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
sw->tb->security_level == TB_SECURITY_DPONLY) sw->tb->security_level == TB_SECURITY_DPONLY ||
!has_port(sw, TB_TYPE_PCIE_UP))
return 0; return 0;
} else if (attr == &dev_attr_device.attr) { } else if (attr == &dev_attr_device.attr) {
if (!sw->device) if (!sw->device)
...@@ -1849,6 +1840,39 @@ static void tb_switch_release(struct device *dev) ...@@ -1849,6 +1840,39 @@ static void tb_switch_release(struct device *dev)
kfree(sw); kfree(sw);
} }
static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct tb_switch *sw = tb_to_switch(dev);
const char *type;
if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
if (add_uevent_var(env, "USB4_VERSION=1.0"))
return -ENOMEM;
}
if (!tb_route(sw)) {
type = "host";
} else {
const struct tb_port *port;
bool hub = false;
/* Device is hub if it has any downstream ports */
tb_switch_for_each_port(sw, port) {
if (!port->disabled && !tb_is_upstream_port(port) &&
tb_port_is_null(port)) {
hub = true;
break;
}
}
type = hub ? "hub" : "device";
}
if (add_uevent_var(env, "USB4_TYPE=%s", type))
return -ENOMEM;
return 0;
}
/* /*
* Currently only need to provide the callbacks. Everything else is handled * Currently only need to provide the callbacks. Everything else is handled
* in the connection manager. * in the connection manager.
...@@ -1882,6 +1906,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = { ...@@ -1882,6 +1906,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
struct device_type tb_switch_type = { struct device_type tb_switch_type = {
.name = "thunderbolt_device", .name = "thunderbolt_device",
.release = tb_switch_release, .release = tb_switch_release,
.uevent = tb_switch_uevent,
.pm = &tb_switch_pm_ops, .pm = &tb_switch_pm_ops,
}; };
...@@ -2542,6 +2567,8 @@ int tb_switch_add(struct tb_switch *sw) ...@@ -2542,6 +2567,8 @@ int tb_switch_add(struct tb_switch *sw)
} }
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
tb_check_quirks(sw);
ret = tb_switch_set_uuid(sw); ret = tb_switch_set_uuid(sw);
if (ret) { if (ret) {
dev_err(&sw->dev, "failed to set UUID\n"); dev_err(&sw->dev, "failed to set UUID\n");
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include "tb_regs.h" #include "tb_regs.h"
#include "tunnel.h" #include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
/** /**
* struct tb_cm - Simple Thunderbolt connection manager * struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels * @tunnel_list: List of active tunnels
...@@ -1077,7 +1079,9 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) ...@@ -1077,7 +1079,9 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
return 0; return 0;
} }
static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
struct tb_port *nhi_port, *dst_port; struct tb_port *nhi_port, *dst_port;
...@@ -1089,9 +1093,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1089,9 +1093,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
xd->transmit_path, xd->receive_ring, transmit_ring, receive_path, receive_ring);
xd->receive_path);
if (!tunnel) { if (!tunnel) {
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
return -ENOMEM; return -ENOMEM;
...@@ -1110,29 +1113,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1110,29 +1113,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0; return 0;
} }
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct tb_port *dst_port; struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel; struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel, *n;
struct tb_switch *sw; struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent); sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw); dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
/* list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
* It is possible that the tunnel was already teared down (in if (!tb_tunnel_is_dma(tunnel))
* case of cable disconnect) so it is fine if we cannot find it continue;
* here anymore. if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
*/ continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
receive_path, receive_ring))
tb_deactivate_and_free_tunnel(tunnel); tb_deactivate_and_free_tunnel(tunnel);
}
} }
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!xd->is_unplugged) { if (!xd->is_unplugged) {
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
__tb_disconnect_xdomain_paths(tb, xd); __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
} }
return 0; return 0;
...@@ -1208,12 +1222,12 @@ static void tb_handle_hotplug(struct work_struct *work) ...@@ -1208,12 +1222,12 @@ static void tb_handle_hotplug(struct work_struct *work)
* tb_xdomain_remove() so setting XDomain as * tb_xdomain_remove() so setting XDomain as
* unplugged here prevents deadlock if they call * unplugged here prevents deadlock if they call
* tb_xdomain_disable_paths(). We will tear down * tb_xdomain_disable_paths(). We will tear down
* the path below. * all the tunnels below.
*/ */
xd->is_unplugged = true; xd->is_unplugged = true;
tb_xdomain_remove(xd); tb_xdomain_remove(xd);
port->xdomain = NULL; port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd); __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
tb_xdomain_put(xd); tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port); tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
...@@ -1562,7 +1576,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) ...@@ -1562,7 +1576,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm; struct tb_cm *tcm;
struct tb *tb; struct tb *tb;
tb = tb_domain_alloc(nhi, sizeof(*tcm)); tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
if (!tb) if (!tb)
return NULL; return NULL;
......
...@@ -406,8 +406,12 @@ struct tb_cm_ops { ...@@ -406,8 +406,12 @@ struct tb_cm_ops {
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response); const u8 *challenge, u8 *response);
int (*disconnect_pcie_paths)(struct tb *tb); int (*disconnect_pcie_paths)(struct tb *tb);
int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_data_len, u8 *status, const void *tx_data, size_t tx_data_len,
void *rx_data, size_t rx_data_len); void *rx_data, size_t rx_data_len);
...@@ -625,7 +629,7 @@ void tb_domain_exit(void); ...@@ -625,7 +629,7 @@ void tb_domain_exit(void);
int tb_xdomain_init(void); int tb_xdomain_init(void);
void tb_xdomain_exit(void); void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
int tb_domain_add(struct tb *tb); int tb_domain_add(struct tb *tb);
void tb_domain_remove(struct tb *tb); void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb);
...@@ -641,8 +645,12 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); ...@@ -641,8 +645,12 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_disconnect_pcie_paths(struct tb *tb); int tb_domain_disconnect_pcie_paths(struct tb *tb);
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int tb_domain_disconnect_all_paths(struct tb *tb); int tb_domain_disconnect_all_paths(struct tb *tb);
static inline struct tb *tb_domain_get(struct tb *tb) static inline struct tb *tb_domain_get(struct tb *tb)
...@@ -787,32 +795,6 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) ...@@ -787,32 +795,6 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
return false; return false;
} }
static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return true;
}
}
return false;
}
static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_TGL_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
return true;
}
}
return false;
}
/** /**
* tb_switch_is_usb4() - Is the switch USB4 compliant * tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check * @sw: Switch to check
...@@ -860,7 +842,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) ...@@ -860,7 +842,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_port_unlock(struct tb_port *port); int tb_port_unlock(struct tb_port *port);
int tb_port_enable(struct tb_port *port); int tb_port_enable(struct tb_port *port);
......
...@@ -119,6 +119,7 @@ static struct tb_switch *alloc_host(struct kunit *test) ...@@ -119,6 +119,7 @@ static struct tb_switch *alloc_host(struct kunit *test)
sw->ports[7].config.type = TB_TYPE_NHI; sw->ports[7].config.type = TB_TYPE_NHI;
sw->ports[7].config.max_in_hop_id = 11; sw->ports[7].config.max_in_hop_id = 11;
sw->ports[7].config.max_out_hop_id = 11; sw->ports[7].config.max_out_hop_id = 11;
sw->ports[7].config.nfc_credits = 0x41800000;
sw->ports[8].config.type = TB_TYPE_PCIE_DOWN; sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[8].config.max_in_hop_id = 8; sw->ports[8].config.max_in_hop_id = 8;
...@@ -1594,6 +1595,489 @@ static void tb_test_tunnel_port_on_path(struct kunit *test) ...@@ -1594,6 +1595,489 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
tb_tunnel_free(dp_tunnel); tb_tunnel_free(dp_tunnel);
} }
static void tb_test_tunnel_dma(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA tunnel from NHI to port 1 and back.
*
* [Host 1]
* 1 ^ In HopID 1 -> Out HopID 8
* |
* v In HopID 8 -> Out HopID 1
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA RX tunnel from port 1 to NHI.
*
* [Host 1]
* 1 ^
* |
* | In HopID 15 -> Out HopID 2
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA TX tunnel from NHI to port 1.
*
* [Host 1]
* 1 | In HopID 2 -> Out HopID 15
* |
* v
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
/*
* Create DMA tunnel from NHI to Device #2 port 3 and back.
*
* [Host 1]
* 1 ^ In HopID 1 -> Out HopID x
* |
* 1 | In HopID x -> Out HopID 1
* [Device #1]
* 7 \
* 1 \
* [Device #2]
* 3 | In HopID x -> Out HopID 8
* |
* v In HopID 8 -> Out HopID x
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x701, true);
nhi = &host->ports[7];
port = &dev2->ports[3];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
&dev2->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
&dev1->ports[7]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
&dev1->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
&dev1->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
&dev1->ports[7]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
&dev2->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
tb_tunnel_free(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
tb_tunnel_free(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
tb_tunnel_free(tunnel);
}
static const u32 root_directory[] = {
0x55584401, /* "UXD" v1 */
0x00000018, /* Root directory length */
0x76656e64, /* "vend" */
0x6f726964, /* "orid" */
0x76000001, /* "v" R 1 */
0x00000a27, /* Immediate value, ! Vendor ID */
0x76656e64, /* "vend" */
0x6f726964, /* "orid" */
0x74000003, /* "t" R 3 */
0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
0x64657669, /* "devi" */
0x63656964, /* "ceid" */
0x76000001, /* "v" R 1 */
0x0000000a, /* Immediate value, ! Device ID */
0x64657669, /* "devi" */
0x63656964, /* "ceid" */
0x74000003, /* "t" R 3 */
0x0000001d, /* Text leaf offset, (“Macintosh”) */
0x64657669, /* "devi" */
0x63657276, /* "cerv" */
0x76000001, /* "v" R 1 */
0x80000100, /* Immediate value, Device Revision */
0x6e657477, /* "netw" */
0x6f726b00, /* "ork" */
0x44000014, /* "D" R 20 */
0x00000021, /* Directory data offset, (Network Directory) */
0x4170706c, /* "Appl" */
0x6520496e, /* "e In" */
0x632e0000, /* "c." ! */
0x4d616369, /* "Maci" */
0x6e746f73, /* "ntos" */
0x68000000, /* "h" */
0x00000000, /* padding */
0xca8961c6, /* Directory UUID, Network Directory */
0x9541ce1c, /* Directory UUID, Network Directory */
0x5949b8bd, /* Directory UUID, Network Directory */
0x4f5a5f2e, /* Directory UUID, Network Directory */
0x70727463, /* "prtc" */
0x69640000, /* "id" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol ID */
0x70727463, /* "prtc" */
0x76657273, /* "vers" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol Version */
0x70727463, /* "prtc" */
0x72657673, /* "revs" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol Revision */
0x70727463, /* "prtc" */
0x73746e73, /* "stns" */
0x76000001, /* "v" R 1 */
0x00000000, /* Immediate value, Network Protocol Settings */
};
static const uuid_t network_dir_uuid =
UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
static void tb_test_property_parse(struct kunit *test)
{
struct tb_property_dir *dir, *network_dir;
struct tb_property *p;
dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_TRUE(test, dir != NULL);
p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_TRUE(test, !p);
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa27);
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa);
p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
KUNIT_ASSERT_TRUE(test, !p);
p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
KUNIT_ASSERT_TRUE(test, p != NULL);
network_dir = p->value.dir;
KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_TRUE(test, p != NULL);
KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x0);
p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_EXPECT_TRUE(test, !p);
p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
KUNIT_EXPECT_TRUE(test, !p);
tb_property_free_dir(dir);
}
static void tb_test_property_format(struct kunit *test)
{
struct tb_property_dir *dir;
ssize_t block_len;
u32 *block;
int ret, i;
dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_TRUE(test, dir != NULL);
ret = tb_property_format_dir(dir, NULL, 0);
KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
block_len = ret;
block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, block != NULL);
ret = tb_property_format_dir(dir, block, block_len);
KUNIT_EXPECT_EQ(test, ret, 0);
for (i = 0; i < ARRAY_SIZE(root_directory); i++)
KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
tb_property_free_dir(dir);
}
static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
struct tb_property_dir *d2)
{
struct tb_property *p1, *p2, *tmp;
int n1, n2, i;
if (d1->uuid) {
KUNIT_ASSERT_TRUE(test, d2->uuid != NULL);
KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
} else {
KUNIT_ASSERT_TRUE(test, d2->uuid == NULL);
}
n1 = 0;
tb_property_for_each(d1, tmp)
n1++;
KUNIT_ASSERT_NE(test, n1, 0);
n2 = 0;
tb_property_for_each(d2, tmp)
n2++;
KUNIT_ASSERT_NE(test, n2, 0);
KUNIT_ASSERT_EQ(test, n1, n2);
p1 = NULL;
p2 = NULL;
for (i = 0; i < n1; i++) {
p1 = tb_property_get_next(d1, p1);
KUNIT_ASSERT_TRUE(test, p1 != NULL);
p2 = tb_property_get_next(d2, p2);
KUNIT_ASSERT_TRUE(test, p2 != NULL);
KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
KUNIT_ASSERT_EQ(test, p1->type, p2->type);
KUNIT_ASSERT_EQ(test, p1->length, p2->length);
switch (p1->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
KUNIT_ASSERT_TRUE(test, p1->value.dir != NULL);
KUNIT_ASSERT_TRUE(test, p2->value.dir != NULL);
compare_dirs(test, p1->value.dir, p2->value.dir);
break;
case TB_PROPERTY_TYPE_DATA:
KUNIT_ASSERT_TRUE(test, p1->value.data != NULL);
KUNIT_ASSERT_TRUE(test, p2->value.data != NULL);
KUNIT_ASSERT_TRUE(test,
!memcmp(p1->value.data, p2->value.data,
p1->length * 4)
);
break;
case TB_PROPERTY_TYPE_TEXT:
KUNIT_ASSERT_TRUE(test, p1->value.text != NULL);
KUNIT_ASSERT_TRUE(test, p2->value.text != NULL);
KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
break;
case TB_PROPERTY_TYPE_VALUE:
KUNIT_ASSERT_EQ(test, p1->value.immediate,
p2->value.immediate);
break;
default:
KUNIT_FAIL(test, "unexpected property type");
break;
}
}
}
static void tb_test_property_copy(struct kunit *test)
{
struct tb_property_dir *src, *dst;
u32 *block;
int ret, i;
src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_TRUE(test, src != NULL);
dst = tb_property_copy_dir(src);
KUNIT_ASSERT_TRUE(test, dst != NULL);
/* Compare the structures */
compare_dirs(test, src, dst);
/* Compare the resulting property block */
ret = tb_property_format_dir(dst, NULL, 0);
KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, block != NULL);
ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
KUNIT_EXPECT_TRUE(test, !ret);
for (i = 0; i < ARRAY_SIZE(root_directory); i++)
KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
tb_property_free_dir(dst);
tb_property_free_dir(src);
}
static struct kunit_case tb_test_cases[] = { static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_path_basic), KUNIT_CASE(tb_test_path_basic),
KUNIT_CASE(tb_test_path_not_connected_walk), KUNIT_CASE(tb_test_path_not_connected_walk),
...@@ -1616,6 +2100,14 @@ static struct kunit_case tb_test_cases[] = { ...@@ -1616,6 +2100,14 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_tunnel_dp_max_length), KUNIT_CASE(tb_test_tunnel_dp_max_length),
KUNIT_CASE(tb_test_tunnel_port_on_path), KUNIT_CASE(tb_test_tunnel_port_on_path),
KUNIT_CASE(tb_test_tunnel_usb3), KUNIT_CASE(tb_test_tunnel_usb3),
KUNIT_CASE(tb_test_tunnel_dma),
KUNIT_CASE(tb_test_tunnel_dma_rx),
KUNIT_CASE(tb_test_tunnel_dma_tx),
KUNIT_CASE(tb_test_tunnel_dma_chain),
KUNIT_CASE(tb_test_tunnel_dma_match),
KUNIT_CASE(tb_test_property_parse),
KUNIT_CASE(tb_test_property_format),
KUNIT_CASE(tb_test_property_copy),
{ } { }
}; };
......
...@@ -794,24 +794,14 @@ static u32 tb_dma_credits(struct tb_port *nhi) ...@@ -794,24 +794,14 @@ static u32 tb_dma_credits(struct tb_port *nhi)
return min(max_credits, 13U); return min(max_credits, 13U);
} }
static int tb_dma_activate(struct tb_tunnel *tunnel, bool active) static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
{
struct tb_port *nhi = tunnel->src_port;
u32 credits;
credits = active ? tb_dma_credits(nhi) : 0;
return tb_port_set_initial_credits(nhi, credits);
}
static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
unsigned int efc, u32 credits)
{ {
int i; int i;
path->egress_fc_enable = efc; path->egress_fc_enable = efc;
path->ingress_fc_enable = TB_PATH_ALL; path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE; path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = isb; path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 5; path->priority = 5;
path->weight = 1; path->weight = 1;
path->clear_fc = true; path->clear_fc = true;
...@@ -825,28 +815,28 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb, ...@@ -825,28 +815,28 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @nhi: Host controller port * @nhi: Host controller port
* @dst: Destination null port which the other domain is connected to * @dst: Destination null port which the other domain is connected to
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Set to %0 if TX path is not needed.
* @transmit_path: HopID used for transmitting packets * @transmit_path: HopID used for transmitting packets
* @receive_ring: NHI ring number used to receive packets from the * @transmit_ring: NHI ring number used to send packets towards the
* other domain. Set to %0 if RX path is not needed. * other domain. Set to %-1 if TX path is not needed.
* @receive_path: HopID used for receiving packets * @receive_path: HopID used for receiving packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %-1 if RX path is not needed.
* *
* Return: Returns a tb_tunnel on success or NULL on failure. * Return: Returns a tb_tunnel on success or NULL on failure.
*/ */
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring, struct tb_port *dst, int transmit_path,
int transmit_path, int receive_ring, int transmit_ring, int receive_path,
int receive_path) int receive_ring)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
size_t npaths = 0, i = 0; size_t npaths = 0, i = 0;
struct tb_path *path; struct tb_path *path;
u32 credits; u32 credits;
if (receive_ring) if (receive_ring > 0)
npaths++; npaths++;
if (transmit_ring) if (transmit_ring > 0)
npaths++; npaths++;
if (WARN_ON(!npaths)) if (WARN_ON(!npaths))
...@@ -856,38 +846,96 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, ...@@ -856,38 +846,96 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
if (!tunnel) if (!tunnel)
return NULL; return NULL;
tunnel->activate = tb_dma_activate;
tunnel->src_port = nhi; tunnel->src_port = nhi;
tunnel->dst_port = dst; tunnel->dst_port = dst;
credits = tb_dma_credits(nhi); credits = tb_dma_credits(nhi);
if (receive_ring) { if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
"DMA RX"); "DMA RX");
if (!path) { if (!path) {
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
return NULL; return NULL;
} }
tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
credits);
tunnel->paths[i++] = path; tunnel->paths[i++] = path;
} }
if (transmit_ring) { if (transmit_ring > 0) {
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
"DMA TX"); "DMA TX");
if (!path) { if (!path) {
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
return NULL; return NULL;
} }
tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); tb_dma_init_path(path, TB_PATH_ALL, credits);
tunnel->paths[i++] = path; tunnel->paths[i++] = path;
} }
return tunnel; return tunnel;
} }
/**
* tb_tunnel_match_dma() - Match DMA tunnel
* @tunnel: Tunnel to match
* @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Pass %-1 to ignore.
* @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Pass %-1 to ignore.
*
* This function can be used to match specific DMA tunnel, if there are
* multiple DMA tunnels going through the same XDomain connection.
* Returns true if there is match and false otherwise.
*/
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring)
{
const struct tb_path *tx_path = NULL, *rx_path = NULL;
int i;
if (!receive_ring || !transmit_ring)
return false;
for (i = 0; i < tunnel->npaths; i++) {
const struct tb_path *path = tunnel->paths[i];
if (!path)
continue;
if (tb_port_is_nhi(path->hops[0].in_port))
tx_path = path;
else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
rx_path = path;
}
if (transmit_ring > 0 || transmit_path > 0) {
if (!tx_path)
return false;
if (transmit_ring > 0 &&
(tx_path->hops[0].in_hop_index != transmit_ring))
return false;
if (transmit_path > 0 &&
(tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
return false;
}
if (receive_ring > 0 || receive_path > 0) {
if (!rx_path)
return false;
if (receive_path > 0 &&
(rx_path->hops[0].in_hop_index != receive_path))
return false;
if (receive_ring > 0 &&
(rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
return false;
}
return true;
}
static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
{ {
int ret, up_max_rate, down_max_rate; int ret, up_max_rate, down_max_rate;
......
...@@ -70,9 +70,11 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, ...@@ -70,9 +70,11 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up, struct tb_port *out, int max_up,
int max_down); int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring, struct tb_port *dst, int transmit_path,
int transmit_path, int receive_ring, int transmit_ring, int receive_path,
int receive_path); int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring);
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up, struct tb_port *down, int max_up,
......
...@@ -12,17 +12,19 @@ ...@@ -12,17 +12,19 @@
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/uuid.h> #include <linux/uuid.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "tb.h" #include "tb.h"
#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ #define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
#define XDOMAIN_UUID_RETRIES 10 #define XDOMAIN_UUID_RETRIES 10
#define XDOMAIN_PROPERTIES_RETRIES 60 #define XDOMAIN_PROPERTIES_RETRIES 10
#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
#define XDOMAIN_BONDING_WAIT 100 /* ms */ #define XDOMAIN_BONDING_WAIT 100 /* ms */
#define XDOMAIN_DEFAULT_MAX_HOPID 15
struct xdomain_request_work { struct xdomain_request_work {
struct work_struct work; struct work_struct work;
...@@ -34,13 +36,15 @@ static bool tb_xdomain_enabled = true; ...@@ -34,13 +36,15 @@ static bool tb_xdomain_enabled = true;
module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)"); MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
/* Serializes access to the properties and protocol handlers below */ /*
* Serializes access to the properties and protocol handlers below. If
* you need to take both this lock and the struct tb_xdomain lock, take
* this one first.
*/
static DEFINE_MUTEX(xdomain_lock); static DEFINE_MUTEX(xdomain_lock);
/* Properties exposed to the remote domains */ /* Properties exposed to the remote domains */
static struct tb_property_dir *xdomain_property_dir; static struct tb_property_dir *xdomain_property_dir;
static u32 *xdomain_property_block;
static u32 xdomain_property_block_len;
static u32 xdomain_property_block_gen; static u32 xdomain_property_block_gen;
/* Additional protocol handlers */ /* Additional protocol handlers */
...@@ -385,8 +389,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, ...@@ -385,8 +389,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
} }
static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
u64 route, u8 sequence, const uuid_t *src_uuid, struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
const struct tb_xdp_properties *req)
{ {
struct tb_xdp_properties_response *res; struct tb_xdp_properties_response *res;
size_t total_size; size_t total_size;
...@@ -398,39 +401,39 @@ static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, ...@@ -398,39 +401,39 @@ static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
* protocol supports forwarding, though which we might add * protocol supports forwarding, though which we might add
* support later on. * support later on.
*/ */
if (!uuid_equal(src_uuid, &req->dst_uuid)) { if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
tb_xdp_error_response(ctl, route, sequence, tb_xdp_error_response(ctl, xd->route, sequence,
ERROR_UNKNOWN_DOMAIN); ERROR_UNKNOWN_DOMAIN);
return 0; return 0;
} }
mutex_lock(&xdomain_lock); mutex_lock(&xd->lock);
if (req->offset >= xdomain_property_block_len) { if (req->offset >= xd->local_property_block_len) {
mutex_unlock(&xdomain_lock); mutex_unlock(&xd->lock);
return -EINVAL; return -EINVAL;
} }
len = xdomain_property_block_len - req->offset; len = xd->local_property_block_len - req->offset;
len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
total_size = sizeof(*res) + len * 4; total_size = sizeof(*res) + len * 4;
res = kzalloc(total_size, GFP_KERNEL); res = kzalloc(total_size, GFP_KERNEL);
if (!res) { if (!res) {
mutex_unlock(&xdomain_lock); mutex_unlock(&xd->lock);
return -ENOMEM; return -ENOMEM;
} }
tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
total_size); total_size);
res->generation = xdomain_property_block_gen; res->generation = xd->local_property_block_gen;
res->data_length = xdomain_property_block_len; res->data_length = xd->local_property_block_len;
res->offset = req->offset; res->offset = req->offset;
uuid_copy(&res->src_uuid, src_uuid); uuid_copy(&res->src_uuid, xd->local_uuid);
uuid_copy(&res->dst_uuid, &req->src_uuid); uuid_copy(&res->dst_uuid, &req->src_uuid);
memcpy(res->data, &xdomain_property_block[req->offset], len * 4); memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
mutex_unlock(&xdomain_lock); mutex_unlock(&xd->lock);
ret = __tb_xdomain_response(ctl, res, total_size, ret = __tb_xdomain_response(ctl, res, total_size,
TB_CFG_PKG_XDOMAIN_RESP); TB_CFG_PKG_XDOMAIN_RESP);
...@@ -512,52 +515,63 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) ...@@ -512,52 +515,63 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
} }
EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
static int rebuild_property_block(void) static void update_property_block(struct tb_xdomain *xd)
{ {
u32 *block, len; mutex_lock(&xdomain_lock);
int ret; mutex_lock(&xd->lock);
/*
* If the local property block is not up-to-date, rebuild it now
* based on the global property template.
*/
if (!xd->local_property_block ||
xd->local_property_block_gen < xdomain_property_block_gen) {
struct tb_property_dir *dir;
int ret, block_len;
u32 *block;
ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); dir = tb_property_copy_dir(xdomain_property_dir);
if (ret < 0) if (!dir) {
return ret; dev_warn(&xd->dev, "failed to copy properties\n");
goto out_unlock;
}
len = ret; /* Fill in non-static properties now */
tb_property_add_text(dir, "deviceid", utsname()->nodename);
tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
block = kcalloc(len, sizeof(u32), GFP_KERNEL); ret = tb_property_format_dir(dir, NULL, 0);
if (!block) if (ret < 0) {
return -ENOMEM; dev_warn(&xd->dev, "local property block creation failed\n");
tb_property_free_dir(dir);
goto out_unlock;
}
block_len = ret;
block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
if (!block) {
tb_property_free_dir(dir);
goto out_unlock;
}
ret = tb_property_format_dir(xdomain_property_dir, block, len); ret = tb_property_format_dir(dir, block, block_len);
if (ret) { if (ret) {
dev_warn(&xd->dev, "property block generation failed\n");
tb_property_free_dir(dir);
kfree(block); kfree(block);
return ret; goto out_unlock;
} }
kfree(xdomain_property_block); tb_property_free_dir(dir);
xdomain_property_block = block; /* Release the previous block */
xdomain_property_block_len = len; kfree(xd->local_property_block);
xdomain_property_block_gen++; /* Assign new one */
xd->local_property_block = block;
return 0; xd->local_property_block_len = block_len;
} xd->local_property_block_gen = xdomain_property_block_gen;
static void finalize_property_block(void)
{
const struct tb_property *nodename;
/*
* On first XDomain connection we set up the the system
* nodename. This delayed here because userspace may not have it
* set when the driver is first probed.
*/
mutex_lock(&xdomain_lock);
nodename = tb_property_find(xdomain_property_dir, "deviceid",
TB_PROPERTY_TYPE_TEXT);
if (!nodename) {
tb_property_add_text(xdomain_property_dir, "deviceid",
utsname()->nodename);
rebuild_property_block();
} }
out_unlock:
mutex_unlock(&xd->lock);
mutex_unlock(&xdomain_lock); mutex_unlock(&xdomain_lock);
} }
...@@ -568,6 +582,7 @@ static void tb_xdp_handle_request(struct work_struct *work) ...@@ -568,6 +582,7 @@ static void tb_xdp_handle_request(struct work_struct *work)
const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
struct tb *tb = xw->tb; struct tb *tb = xw->tb;
struct tb_ctl *ctl = tb->ctl; struct tb_ctl *ctl = tb->ctl;
struct tb_xdomain *xd;
const uuid_t *uuid; const uuid_t *uuid;
int ret = 0; int ret = 0;
u32 sequence; u32 sequence;
...@@ -589,17 +604,21 @@ static void tb_xdp_handle_request(struct work_struct *work) ...@@ -589,17 +604,21 @@ static void tb_xdp_handle_request(struct work_struct *work)
goto out; goto out;
} }
finalize_property_block(); tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
xd = tb_xdomain_find_by_route_locked(tb, route);
if (xd)
update_property_block(xd);
switch (pkg->type) { switch (pkg->type) {
case PROPERTIES_REQUEST: case PROPERTIES_REQUEST:
ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, if (xd) {
ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
(const struct tb_xdp_properties *)pkg); (const struct tb_xdp_properties *)pkg);
}
break; break;
case PROPERTIES_CHANGED_REQUEST: { case PROPERTIES_CHANGED_REQUEST:
struct tb_xdomain *xd;
ret = tb_xdp_properties_changed_response(ctl, route, sequence); ret = tb_xdp_properties_changed_response(ctl, route, sequence);
/* /*
...@@ -607,17 +626,11 @@ static void tb_xdp_handle_request(struct work_struct *work) ...@@ -607,17 +626,11 @@ static void tb_xdp_handle_request(struct work_struct *work)
* the xdomain related to this connection as well in * the xdomain related to this connection as well in
* case there is a change in services it offers. * case there is a change in services it offers.
*/ */
xd = tb_xdomain_find_by_route_locked(tb, route); if (xd && device_is_registered(&xd->dev)) {
if (xd) {
if (device_is_registered(&xd->dev)) {
queue_delayed_work(tb->wq, &xd->get_properties_work, queue_delayed_work(tb->wq, &xd->get_properties_work,
msecs_to_jiffies(50)); msecs_to_jiffies(50));
} }
tb_xdomain_put(xd);
}
break; break;
}
case UUID_REQUEST_OLD: case UUID_REQUEST_OLD:
case UUID_REQUEST: case UUID_REQUEST:
...@@ -630,6 +643,8 @@ static void tb_xdp_handle_request(struct work_struct *work) ...@@ -630,6 +643,8 @@ static void tb_xdp_handle_request(struct work_struct *work)
break; break;
} }
tb_xdomain_put(xd);
if (ret) { if (ret) {
tb_warn(tb, "failed to send XDomain response for %#x\n", tb_warn(tb, "failed to send XDomain response for %#x\n",
pkg->type); pkg->type);
...@@ -811,7 +826,7 @@ static int remove_missing_service(struct device *dev, void *data) ...@@ -811,7 +826,7 @@ static int remove_missing_service(struct device *dev, void *data)
if (!svc) if (!svc)
return 0; return 0;
if (!tb_property_find(xd->properties, svc->key, if (!tb_property_find(xd->remote_properties, svc->key,
TB_PROPERTY_TYPE_DIRECTORY)) TB_PROPERTY_TYPE_DIRECTORY))
device_unregister(dev); device_unregister(dev);
...@@ -871,7 +886,7 @@ static void enumerate_services(struct tb_xdomain *xd) ...@@ -871,7 +886,7 @@ static void enumerate_services(struct tb_xdomain *xd)
device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
/* Then re-enumerate properties creating new services as we go */ /* Then re-enumerate properties creating new services as we go */
tb_property_for_each(xd->properties, p) { tb_property_for_each(xd->remote_properties, p) {
if (p->type != TB_PROPERTY_TYPE_DIRECTORY) if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
continue; continue;
...@@ -928,6 +943,14 @@ static int populate_properties(struct tb_xdomain *xd, ...@@ -928,6 +943,14 @@ static int populate_properties(struct tb_xdomain *xd,
return -EINVAL; return -EINVAL;
xd->vendor = p->value.immediate; xd->vendor = p->value.immediate;
p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
/*
* USB4 inter-domain spec suggests using 15 as HopID if the
* other end does not announce it in a property. This is for
* TBT3 compatibility.
*/
xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
kfree(xd->device_name); kfree(xd->device_name);
xd->device_name = NULL; xd->device_name = NULL;
kfree(xd->vendor_name); kfree(xd->vendor_name);
...@@ -944,19 +967,6 @@ static int populate_properties(struct tb_xdomain *xd, ...@@ -944,19 +967,6 @@ static int populate_properties(struct tb_xdomain *xd,
return 0; return 0;
} }
/* Called with @xd->lock held */
static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
{
if (!xd->resume)
return;
xd->resume = false;
if (xd->transmit_path) {
dev_dbg(&xd->dev, "re-establishing DMA path\n");
tb_domain_approve_xdomain_paths(xd->tb, xd);
}
}
static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
{ {
return tb_to_switch(xd->dev.parent); return tb_to_switch(xd->dev.parent);
...@@ -1002,9 +1012,12 @@ static void tb_xdomain_get_uuid(struct work_struct *work) ...@@ -1002,9 +1012,12 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
uuid_t uuid; uuid_t uuid;
int ret; int ret;
dev_dbg(&xd->dev, "requesting remote UUID\n");
ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
if (ret < 0) { if (ret < 0) {
if (xd->uuid_retries-- > 0) { if (xd->uuid_retries-- > 0) {
dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
msecs_to_jiffies(100)); msecs_to_jiffies(100));
} else { } else {
...@@ -1013,6 +1026,8 @@ static void tb_xdomain_get_uuid(struct work_struct *work) ...@@ -1013,6 +1026,8 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
return; return;
} }
dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
if (uuid_equal(&uuid, xd->local_uuid)) if (uuid_equal(&uuid, xd->local_uuid))
dev_dbg(&xd->dev, "intra-domain loop detected\n"); dev_dbg(&xd->dev, "intra-domain loop detected\n");
...@@ -1052,11 +1067,15 @@ static void tb_xdomain_get_properties(struct work_struct *work) ...@@ -1052,11 +1067,15 @@ static void tb_xdomain_get_properties(struct work_struct *work)
u32 gen = 0; u32 gen = 0;
int ret; int ret;
dev_dbg(&xd->dev, "requesting remote properties\n");
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
xd->remote_uuid, xd->properties_retries, xd->remote_uuid, xd->properties_retries,
&block, &gen); &block, &gen);
if (ret < 0) { if (ret < 0) {
if (xd->properties_retries-- > 0) { if (xd->properties_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to request remote properties, retrying\n");
queue_delayed_work(xd->tb->wq, &xd->get_properties_work, queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
} else { } else {
...@@ -1073,16 +1092,8 @@ static void tb_xdomain_get_properties(struct work_struct *work) ...@@ -1073,16 +1092,8 @@ static void tb_xdomain_get_properties(struct work_struct *work)
mutex_lock(&xd->lock); mutex_lock(&xd->lock);
/* Only accept newer generation properties */ /* Only accept newer generation properties */
if (xd->properties && gen <= xd->property_block_gen) { if (xd->remote_properties && gen <= xd->remote_property_block_gen)
/*
* On resume it is likely that the properties block is
* not changed (unless the other end added or removed
* services). However, we need to make sure the existing
* DMA paths are restored properly.
*/
tb_xdomain_restore_paths(xd);
goto err_free_block; goto err_free_block;
}
dir = tb_property_parse_dir(block, ret); dir = tb_property_parse_dir(block, ret);
if (!dir) { if (!dir) {
...@@ -1097,18 +1108,16 @@ static void tb_xdomain_get_properties(struct work_struct *work) ...@@ -1097,18 +1108,16 @@ static void tb_xdomain_get_properties(struct work_struct *work)
} }
/* Release the existing one */ /* Release the existing one */
if (xd->properties) { if (xd->remote_properties) {
tb_property_free_dir(xd->properties); tb_property_free_dir(xd->remote_properties);
update = true; update = true;
} }
xd->properties = dir; xd->remote_properties = dir;
xd->property_block_gen = gen; xd->remote_property_block_gen = gen;
tb_xdomain_update_link_attributes(xd); tb_xdomain_update_link_attributes(xd);
tb_xdomain_restore_paths(xd);
mutex_unlock(&xd->lock); mutex_unlock(&xd->lock);
kfree(block); kfree(block);
...@@ -1123,6 +1132,11 @@ static void tb_xdomain_get_properties(struct work_struct *work) ...@@ -1123,6 +1132,11 @@ static void tb_xdomain_get_properties(struct work_struct *work)
dev_err(&xd->dev, "failed to add XDomain device\n"); dev_err(&xd->dev, "failed to add XDomain device\n");
return; return;
} }
dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
xd->vendor, xd->device);
if (xd->vendor_name && xd->device_name)
dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
xd->device_name);
} else { } else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
} }
...@@ -1143,13 +1157,19 @@ static void tb_xdomain_properties_changed(struct work_struct *work) ...@@ -1143,13 +1157,19 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
properties_changed_work.work); properties_changed_work.work);
int ret; int ret;
dev_dbg(&xd->dev, "sending properties changed notification\n");
ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
xd->properties_changed_retries, xd->local_uuid); xd->properties_changed_retries, xd->local_uuid);
if (ret) { if (ret) {
if (xd->properties_changed_retries-- > 0) if (xd->properties_changed_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to send properties changed notification, retrying\n");
queue_delayed_work(xd->tb->wq, queue_delayed_work(xd->tb->wq,
&xd->properties_changed_work, &xd->properties_changed_work,
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
}
dev_err(&xd->dev, "failed to send properties changed notification\n");
return; return;
} }
...@@ -1180,6 +1200,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -1180,6 +1200,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
} }
static DEVICE_ATTR_RO(device_name); static DEVICE_ATTR_RO(device_name);
static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sprintf(buf, "%d\n", xd->remote_max_hopid);
}
static DEVICE_ATTR_RO(maxhopid);
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
...@@ -1238,6 +1267,7 @@ static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); ...@@ -1238,6 +1267,7 @@ static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
static struct attribute *xdomain_attrs[] = { static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr, &dev_attr_device.attr,
&dev_attr_device_name.attr, &dev_attr_device_name.attr,
&dev_attr_maxhopid.attr,
&dev_attr_rx_lanes.attr, &dev_attr_rx_lanes.attr,
&dev_attr_rx_speed.attr, &dev_attr_rx_speed.attr,
&dev_attr_tx_lanes.attr, &dev_attr_tx_lanes.attr,
...@@ -1263,7 +1293,10 @@ static void tb_xdomain_release(struct device *dev) ...@@ -1263,7 +1293,10 @@ static void tb_xdomain_release(struct device *dev)
put_device(xd->dev.parent); put_device(xd->dev.parent);
tb_property_free_dir(xd->properties); kfree(xd->local_property_block);
tb_property_free_dir(xd->remote_properties);
ida_destroy(&xd->out_hopids);
ida_destroy(&xd->in_hopids);
ida_destroy(&xd->service_ids); ida_destroy(&xd->service_ids);
kfree(xd->local_uuid); kfree(xd->local_uuid);
...@@ -1310,15 +1343,7 @@ static int __maybe_unused tb_xdomain_suspend(struct device *dev) ...@@ -1310,15 +1343,7 @@ static int __maybe_unused tb_xdomain_suspend(struct device *dev)
static int __maybe_unused tb_xdomain_resume(struct device *dev) static int __maybe_unused tb_xdomain_resume(struct device *dev)
{ {
struct tb_xdomain *xd = tb_to_xdomain(dev); start_handshake(tb_to_xdomain(dev));
/*
* Ask tb_xdomain_get_properties() restore any existing DMA
* paths after properties are re-read.
*/
xd->resume = true;
start_handshake(xd);
return 0; return 0;
} }
...@@ -1363,7 +1388,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, ...@@ -1363,7 +1388,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->tb = tb; xd->tb = tb;
xd->route = route; xd->route = route;
xd->local_max_hopid = down->config.max_in_hop_id;
ida_init(&xd->service_ids); ida_init(&xd->service_ids);
ida_init(&xd->in_hopids);
ida_init(&xd->out_hopids);
mutex_init(&xd->lock); mutex_init(&xd->lock);
INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
...@@ -1390,6 +1418,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, ...@@ -1390,6 +1418,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->dev.groups = xdomain_attr_groups; xd->dev.groups = xdomain_attr_groups;
dev_set_name(&xd->dev, "%u-%llx", tb->index, route); dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
if (remote_uuid)
dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
/* /*
* This keeps the DMA powered on as long as we have active * This keeps the DMA powered on as long as we have active
* connection to another host. * connection to another host.
...@@ -1452,10 +1484,12 @@ void tb_xdomain_remove(struct tb_xdomain *xd) ...@@ -1452,10 +1484,12 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
pm_runtime_put_noidle(&xd->dev); pm_runtime_put_noidle(&xd->dev);
pm_runtime_set_suspended(&xd->dev); pm_runtime_set_suspended(&xd->dev);
if (!device_is_registered(&xd->dev)) if (!device_is_registered(&xd->dev)) {
put_device(&xd->dev); put_device(&xd->dev);
else } else {
dev_info(&xd->dev, "host disconnected\n");
device_unregister(&xd->dev); device_unregister(&xd->dev);
}
} }
/** /**
...@@ -1523,73 +1557,118 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) ...@@ -1523,73 +1557,118 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
/** /**
* tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
* @xd: XDomain connection * @xd: XDomain connection
* @transmit_path: HopID of the transmit path the other end is using to * @hopid: Preferred HopID or %-1 for next available
* send packets
* @transmit_ring: DMA ring used to receive packets from the other end
* @receive_path: HopID of the receive path the other end is using to
* receive packets
* @receive_ring: DMA ring used to send packets to the other end
* *
* The function enables DMA paths accordingly so that after successful * Returns allocated HopID or negative errno. Specifically returns
* return the caller can send and receive packets using high-speed DMA * %-ENOSPC if there are no more available HopIDs. Returned HopID is
* path. * guaranteed to be within range supported by the input lane adapter.
* * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
* Return: %0 in case of success and negative errno in case of error
*/ */
int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
u16 transmit_ring, u16 receive_path,
u16 receive_ring)
{ {
int ret; if (hopid < 0)
hopid = TB_PATH_MIN_HOPID;
if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
return -EINVAL;
mutex_lock(&xd->lock); return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
if (xd->transmit_path) { /**
ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
goto exit_unlock; * @xd: XDomain connection
} * @hopid: Preferred HopID or %-1 for next available
*
* Returns allocated HopID or negative errno. Specifically returns
* %-ENOSPC if there are no more available HopIDs. Returned HopID is
* guaranteed to be within range supported by the output lane adapter.
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
*/
int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
{
if (hopid < 0)
hopid = TB_PATH_MIN_HOPID;
if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
return -EINVAL;
xd->transmit_path = transmit_path; return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
xd->transmit_ring = transmit_ring; GFP_KERNEL);
xd->receive_path = receive_path; }
xd->receive_ring = receive_ring; EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
ret = tb_domain_approve_xdomain_paths(xd->tb, xd); /**
* tb_xdomain_release_in_hopid() - Release input HopID
* @xd: XDomain connection
* @hopid: HopID to release
*/
void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
{
ida_free(&xd->in_hopids, hopid);
}
EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
exit_unlock: /**
mutex_unlock(&xd->lock); * tb_xdomain_release_out_hopid() - Release output HopID
* @xd: XDomain connection
* @hopid: HopID to release
*/
void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
{
ida_free(&xd->out_hopids, hopid);
}
EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
return ret; /**
* tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
* @xd: XDomain connection
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
*
* The function enables DMA paths accordingly so that after successful
* return the caller can send and receive packets using high-speed DMA
* path. If a transmit or receive path is not needed, pass %-1 for those
* parameters.
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring)
{
return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
} }
EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
/** /**
* tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
* @xd: XDomain connection * @xd: XDomain connection
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
* *
* This does the opposite of tb_xdomain_enable_paths(). After call to * This does the opposite of tb_xdomain_enable_paths(). After call to
* this the caller is not expected to use the rings anymore. * this the caller is not expected to use the rings anymore. Passing %-1
* as path/ring parameter means don't care. Normally the callers should
* pass the same values here as they do when paths are enabled.
* *
* Return: %0 in case of success and negative errno in case of error * Return: %0 in case of success and negative errno in case of error
*/ */
int tb_xdomain_disable_paths(struct tb_xdomain *xd) int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring)
{ {
int ret = 0; return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
transmit_ring, receive_path,
mutex_lock(&xd->lock); receive_ring);
if (xd->transmit_path) {
xd->transmit_path = 0;
xd->transmit_ring = 0;
xd->receive_path = 0;
xd->receive_ring = 0;
ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
}
mutex_unlock(&xd->lock);
return ret;
} }
EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
...@@ -1826,11 +1905,7 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir) ...@@ -1826,11 +1905,7 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
if (ret) if (ret)
goto err_unlock; goto err_unlock;
ret = rebuild_property_block(); xdomain_property_block_gen++;
if (ret) {
remove_directory(key, dir);
goto err_unlock;
}
mutex_unlock(&xdomain_lock); mutex_unlock(&xdomain_lock);
update_all_xdomains(); update_all_xdomains();
...@@ -1856,7 +1931,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) ...@@ -1856,7 +1931,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
mutex_lock(&xdomain_lock); mutex_lock(&xdomain_lock);
if (remove_directory(key, dir)) if (remove_directory(key, dir))
ret = rebuild_property_block(); xdomain_property_block_gen++;
mutex_unlock(&xdomain_lock); mutex_unlock(&xdomain_lock);
if (!ret) if (!ret)
...@@ -1875,7 +1950,8 @@ int tb_xdomain_init(void) ...@@ -1875,7 +1950,8 @@ int tb_xdomain_init(void)
* directories. Those will be added by service drivers * directories. Those will be added by service drivers
* themselves when they are loaded. * themselves when they are loaded.
* *
* We also add node name later when first connection is made. * Rest of the properties are filled dynamically based on these
* when the P2P connection is made.
*/ */
tb_property_add_immediate(xdomain_property_dir, "vendorid", tb_property_add_immediate(xdomain_property_dir, "vendorid",
PCI_VENDOR_ID_INTEL); PCI_VENDOR_ID_INTEL);
...@@ -1883,11 +1959,11 @@ int tb_xdomain_init(void) ...@@ -1883,11 +1959,11 @@ int tb_xdomain_init(void)
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
xdomain_property_block_gen = prandom_u32();
return 0; return 0;
} }
void tb_xdomain_exit(void) void tb_xdomain_exit(void)
{ {
kfree(xdomain_property_block);
tb_property_free_dir(xdomain_property_dir); tb_property_free_dir(xdomain_property_dir);
} }
...@@ -146,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block, ...@@ -146,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len); size_t block_len);
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len); size_t block_len);
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
void tb_property_free_dir(struct tb_property_dir *dir); void tb_property_free_dir(struct tb_property_dir *dir);
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
...@@ -179,23 +180,24 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); ...@@ -179,23 +180,24 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @route: Route string the other domain can be reached * @route: Route string the other domain can be reached
* @vendor: Vendor ID of the remote domain * @vendor: Vendor ID of the remote domain
* @device: Device ID of the demote domain * @device: Device ID of the demote domain
* @local_max_hopid: Maximum input HopID of this host
* @remote_max_hopid: Maximum input HopID of the remote host
* @lock: Lock to serialize access to the following fields of this structure * @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known) * @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s * @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2) * @link_width: Width of the link (1 or 2)
* @is_unplugged: The XDomain is unplugged * @is_unplugged: The XDomain is unplugged
* @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be * @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first * queried first
* @transmit_path: HopID which the remote end expects us to transmit
* @transmit_ring: Local ring (hop) where outgoing packets are pushed
* @receive_path: HopID which we expect the remote end to transmit
* @receive_ring: Local ring (hop) where incoming packets arrive
* @service_ids: Used to generate IDs for the services * @service_ids: Used to generate IDs for the services
* @properties: Properties exported by the remote domain * @in_hopids: Input HopIDs for DMA tunneling
* @property_block_gen: Generation of @properties * @out_hopids; Output HopIDs for DMA tunneling
* @properties_lock: Lock protecting @properties. * @local_property_block: Local block of properties
* @local_property_block_gen: Generation of @local_property_block
* @local_property_block_len: Length of the @local_property_block in dwords
* @remote_properties: Properties exported by the remote domain
* @remote_property_block_gen: Generation of @remote_properties
* @get_uuid_work: Work used to retrieve @remote_uuid * @get_uuid_work: Work used to retrieve @remote_uuid
* @uuid_retries: Number of times left @remote_uuid is requested before * @uuid_retries: Number of times left @remote_uuid is requested before
* giving up * giving up
...@@ -225,21 +227,23 @@ struct tb_xdomain { ...@@ -225,21 +227,23 @@ struct tb_xdomain {
u64 route; u64 route;
u16 vendor; u16 vendor;
u16 device; u16 device;
unsigned int local_max_hopid;
unsigned int remote_max_hopid;
struct mutex lock; struct mutex lock;
const char *vendor_name; const char *vendor_name;
const char *device_name; const char *device_name;
unsigned int link_speed; unsigned int link_speed;
unsigned int link_width; unsigned int link_width;
bool is_unplugged; bool is_unplugged;
bool resume;
bool needs_uuid; bool needs_uuid;
u16 transmit_path;
u16 transmit_ring;
u16 receive_path;
u16 receive_ring;
struct ida service_ids; struct ida service_ids;
struct tb_property_dir *properties; struct ida in_hopids;
u32 property_block_gen; struct ida out_hopids;
u32 *local_property_block;
u32 local_property_block_gen;
u32 local_property_block_len;
struct tb_property_dir *remote_properties;
u32 remote_property_block_gen;
struct delayed_work get_uuid_work; struct delayed_work get_uuid_work;
int uuid_retries; int uuid_retries;
struct delayed_work get_properties_work; struct delayed_work get_properties_work;
...@@ -252,10 +256,22 @@ struct tb_xdomain { ...@@ -252,10 +256,22 @@ struct tb_xdomain {
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
u16 transmit_ring, u16 receive_path, void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
u16 receive_ring); int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
int tb_xdomain_disable_paths(struct tb_xdomain *xd); void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring);
int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring);
static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
{
return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
}
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment