Commit 57c2af79 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/mst-suspend-resume-reprobe-2019-10-29-2' of...

Merge tag 'topic/mst-suspend-resume-reprobe-2019-10-29-2' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
* Handle UP requests asynchronously in the DP MST helpers, fixing
  hotplug notifications and allowing us to implement suspend/resume
  reprobing
* Add basic suspend/resume reprobing to the DP MST helpers
* Improve locking for link address reprobing and connection status
  request handling in the DP MST helpers
* Miscellaneous refactoring in the DP MST helpers
* Add a Kconfig option to the DP MST helpers to enable tracking of
  gets/puts for topology references for debugging purposes

Driver Changes:
* nouveau: Resume hotplug interrupts earlier, so that sideband
  messages may be transmitted during resume and thus allow
  suspend/resume reprobing for DP MST to work
* nouveau: Avoid grabbing runtime PM references when handling short DP
  pulses, so that handling sideband messages in resume codepaths with the
  DP MST helpers doesn't deadlock us
* i915, nouveau, amdgpu, radeon: Use detect_ctx for probing MST
  connectors, so that we can grab the topology manager's atomic lock

Note: there's some amdgpu patches that I didn't realize were pushed
upstream already when creating this topic branch. When they fail to
apply, you can just ignore and skip them.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/a74c6446bc960190d195a751cb6d8a00a98f3974.camel@redhat.com
parents 8c84b43f 12a280c7
...@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER ...@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER
help help
FBDEV helpers for KMS drivers. FBDEV helpers for KMS drivers.
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
select STACKDEPOT
depends on DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
Enables debug tracing for topology refs in DRM's DP MST helpers. A
history of each topology reference/dereference will be printed to the
kernel log once a port or branch device's topology refcount reaches 0.
This has the potential to use a lot of memory and print some very
large kernel messages. If in doubt, say "N".
config DRM_FBDEV_EMULATION config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver" bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM depends on DRM
......
...@@ -1028,7 +1028,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) ...@@ -1028,7 +1028,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) { if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr); drm_dp_mst_topology_mgr_suspend(mgr);
} else { } else {
ret = drm_dp_mst_topology_mgr_resume(mgr); ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) { if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false); drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true; need_hotplug = true;
......
...@@ -123,21 +123,6 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, ...@@ -123,21 +123,6 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result; return result;
} }
static enum drm_connector_status
dm_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct amdgpu_dm_connector *master = aconnector->mst_port;
enum drm_connector_status status =
drm_dp_mst_detect_port(
connector,
&master->mst_mgr,
aconnector->port);
return status;
}
static void static void
dm_dp_mst_connector_destroy(struct drm_connector *connector) dm_dp_mst_connector_destroy(struct drm_connector *connector)
{ {
...@@ -175,7 +160,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) ...@@ -175,7 +160,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
} }
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy, .destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset, .reset = amdgpu_dm_connector_funcs_reset,
...@@ -250,10 +234,22 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, ...@@ -250,10 +234,22 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector,
return &to_amdgpu_dm_connector(connector)->mst_encoder->base; return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
} }
static int
dm_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct amdgpu_dm_connector *master = aconnector->mst_port;
return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
aconnector->port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes, .get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid, .mode_valid = amdgpu_dm_connector_mode_valid,
.atomic_best_encoder = dm_mst_atomic_best_encoder, .atomic_best_encoder = dm_mst_atomic_best_encoder,
.detect_ctx = dm_dp_mst_detect,
}; };
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
......
...@@ -28,6 +28,13 @@ ...@@ -28,6 +28,13 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
#include <linux/sort.h>
#include <linux/timekeeping.h>
#include <linux/math64.h>
#endif
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h> #include <drm/drm_dp_mst_helper.h>
...@@ -45,6 +52,12 @@ ...@@ -45,6 +52,12 @@
* protocol. The helpers contain a topology manager and bandwidth manager. * protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs. * The helpers encapsulate the sending and received of sideband msgs.
*/ */
struct drm_dp_pending_up_req {
struct drm_dp_sideband_msg_hdr hdr;
struct drm_dp_sideband_msg_req_body msg;
struct list_head next;
};
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf); char *buf);
...@@ -61,7 +74,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, ...@@ -61,7 +74,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes); int offset, int size, u8 *bytes);
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb); struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *mstb,
...@@ -1393,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) ...@@ -1393,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
} }
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
static void drm_dp_destroy_mst_branch_device(struct kref *kref) #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#define STACK_DEPTH 8
static noinline void
__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_ref_history *history,
enum drm_dp_mst_topology_ref_type type)
{ {
struct drm_dp_mst_branch *mstb = struct drm_dp_mst_topology_ref_entry *entry = NULL;
container_of(kref, struct drm_dp_mst_branch, topology_kref); depot_stack_handle_t backtrace;
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; ulong stack_entries[STACK_DEPTH];
struct drm_dp_mst_port *port, *tmp; uint n;
bool wake_tx = false; int i;
mutex_lock(&mgr->lock); n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) { backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
list_del(&port->next); if (!backtrace)
drm_dp_mst_topology_put_port(port); return;
/* Try to find an existing entry for this backtrace */
for (i = 0; i < history->len; i++) {
if (history->entries[i].backtrace == backtrace) {
entry = &history->entries[i];
break;
}
} }
mutex_unlock(&mgr->lock);
/* drop any tx slots msg */ /* Otherwise add one */
mutex_lock(&mstb->mgr->qlock); if (!entry) {
if (mstb->tx_slots[0]) { struct drm_dp_mst_topology_ref_entry *new;
mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; int new_len = history->len + 1;
mstb->tx_slots[0] = NULL;
wake_tx = true; new = krealloc(history->entries, sizeof(*new) * new_len,
GFP_KERNEL);
if (!new)
return;
entry = &new[history->len];
history->len = new_len;
history->entries = new;
entry->backtrace = backtrace;
entry->type = type;
entry->count = 0;
} }
if (mstb->tx_slots[1]) { entry->count++;
mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; entry->ts_nsec = ktime_get_ns();
mstb->tx_slots[1] = NULL; }
wake_tx = true;
static int
topology_ref_history_cmp(const void *a, const void *b)
{
const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
if (entry_a->ts_nsec > entry_b->ts_nsec)
return 1;
else if (entry_a->ts_nsec < entry_b->ts_nsec)
return -1;
else
return 0;
}
static inline const char *
topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
{
if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
return "get";
else
return "put";
}
static void
__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
void *ptr, const char *type_str)
{
struct drm_printer p = drm_debug_printer(DBG_PREFIX);
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
int i;
if (!buf)
return;
if (!history->len)
goto out;
/* First, sort the list so that it goes from oldest to newest
* reference entry
*/
sort(history->entries, history->len, sizeof(*history->entries),
topology_ref_history_cmp, NULL);
drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
type_str, ptr);
for (i = 0; i < history->len; i++) {
const struct drm_dp_mst_topology_ref_entry *entry =
&history->entries[i];
ulong *entries;
uint nr_entries;
u64 ts_nsec = entry->ts_nsec;
u64 rem_nsec = do_div(ts_nsec, 1000000000);
nr_entries = stack_depot_fetch(entry->backtrace, &entries);
stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
drm_printf(&p, " %d %ss (last at %5llu.%06llu):\n%s",
entry->count,
topology_ref_type_to_str(entry->type),
ts_nsec, rem_nsec / 1000, buf);
} }
mutex_unlock(&mstb->mgr->qlock);
if (wake_tx) /* Now free the history, since this is the only time we expose it */
wake_up_all(&mstb->mgr->tx_waitq); kfree(history->entries);
out:
kfree(buf);
}
drm_dp_mst_put_mstb_malloc(mstb); static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
{
__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
"MSTB");
}
static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
{
__dump_topology_ref_history(&port->topology_ref_history, port,
"Port");
}
static __always_inline void
save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
enum drm_dp_mst_topology_ref_type type)
{
__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
}
static __always_inline void
save_port_topology_ref(struct drm_dp_mst_port *port,
enum drm_dp_mst_topology_ref_type type)
{
__topology_ref_save(port->mgr, &port->topology_ref_history, type);
}
static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->topology_ref_history_lock);
}
static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_unlock(&mgr->topology_ref_history_lock);
}
#else
static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
static inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
static inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
#define save_mstb_topology_ref(mstb, type)
#define save_port_topology_ref(port, type)
#endif
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb =
container_of(kref, struct drm_dp_mst_branch, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
drm_dp_mst_dump_mstb_topology_history(mstb);
INIT_LIST_HEAD(&mstb->destroy_next);
/*
* This can get called under mgr->mutex, so we need to perform the
* actual destruction of the mstb in another worker
*/
mutex_lock(&mgr->delayed_destroy_lock);
list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
mutex_unlock(&mgr->delayed_destroy_lock);
schedule_work(&mgr->delayed_destroy_work);
} }
/** /**
...@@ -1453,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) ...@@ -1453,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{ {
int ret = kref_get_unless_zero(&mstb->topology_kref); int ret;
if (ret) topology_ref_history_lock(mstb->mgr);
DRM_DEBUG("mstb %p (%d)\n", mstb, ret = kref_get_unless_zero(&mstb->topology_kref);
kref_read(&mstb->topology_kref)); if (ret) {
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref));
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
}
topology_ref_history_unlock(mstb->mgr);
return ret; return ret;
} }
...@@ -1478,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) ...@@ -1478,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/ */
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{ {
topology_ref_history_lock(mstb->mgr);
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0); WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref); kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
topology_ref_history_unlock(mstb->mgr);
} }
/** /**
...@@ -1498,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) ...@@ -1498,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{ {
topology_ref_history_lock(mstb->mgr);
DRM_DEBUG("mstb %p (%d)\n", DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1); mstb, kref_read(&mstb->topology_kref) - 1);
kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
}
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
struct drm_dp_mst_branch *mstb;
switch (old_pdt) { topology_ref_history_unlock(mstb->mgr);
case DP_PEER_DEVICE_DP_LEGACY_CONV: kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
case DP_PEER_DEVICE_SST_SINK:
/* remove i2c over sideband */
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
mstb = port->mstb;
port->mstb = NULL;
drm_dp_mst_topology_put_mstb(mstb);
break;
}
} }
static void drm_dp_destroy_port(struct kref *kref) static void drm_dp_destroy_port(struct kref *kref)
...@@ -1527,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref) ...@@ -1527,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref); container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) { drm_dp_mst_dump_port_topology_history(port);
/* There's nothing that needs locking to destroy an input port yet */
if (port->input) {
drm_dp_mst_put_port_malloc(port);
return;
}
kfree(port->cached_edid); kfree(port->cached_edid);
/* /*
* The only time we don't have a connector * we can't destroy the connector here, as we might be holding the
* on an output port is if the connector init * mode_config.mutex from an EDID retrieval
* fails.
*/ */
if (port->connector) { mutex_lock(&mgr->delayed_destroy_lock);
/* we can't destroy the connector here, as list_add(&port->next, &mgr->destroy_port_list);
* we might be holding the mode_config.mutex mutex_unlock(&mgr->delayed_destroy_lock);
* from an EDID retrieval */ schedule_work(&mgr->delayed_destroy_work);
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
}
drm_dp_mst_put_port_malloc(port);
} }
/** /**
...@@ -1579,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref) ...@@ -1579,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{ {
int ret = kref_get_unless_zero(&port->topology_kref); int ret;
if (ret) topology_ref_history_lock(port->mgr);
DRM_DEBUG("port %p (%d)\n", port, ret = kref_get_unless_zero(&port->topology_kref);
kref_read(&port->topology_kref)); if (ret) {
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
}
topology_ref_history_unlock(port->mgr);
return ret; return ret;
} }
...@@ -1603,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) ...@@ -1603,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/ */
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{ {
topology_ref_history_lock(port->mgr);
WARN_ON(kref_read(&port->topology_kref) == 0); WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref); kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
topology_ref_history_unlock(port->mgr);
} }
/** /**
...@@ -1621,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) ...@@ -1621,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/ */
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{ {
topology_ref_history_lock(port->mgr);
DRM_DEBUG("port %p (%d)\n", DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1); port, kref_read(&port->topology_kref) - 1);
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port); kref_put(&port->topology_kref, drm_dp_destroy_port);
} }
...@@ -1739,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, ...@@ -1739,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1; return parent_lct + 1;
} }
/* static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
* return sends link address for new mstb
*/
static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
{ {
int ret; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
u8 rad[6], lct; struct drm_dp_mst_branch *mstb;
bool send_link = false; u8 rad[8], lct;
int ret = 0;
if (port->pdt == new_pdt)
return 0;
/* Teardown the old pdt, if there is one */
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/*
* If the new PDT would also have an i2c bus, don't bother
* with reregistering it
*/
if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
new_pdt == DP_PEER_DEVICE_SST_SINK) {
port->pdt = new_pdt;
return 0;
}
/* remove i2c over sideband */
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
mutex_lock(&mgr->lock);
drm_dp_mst_topology_put_mstb(port->mstb);
port->mstb = NULL;
mutex_unlock(&mgr->lock);
break;
}
port->pdt = new_pdt;
switch (port->pdt) { switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK: case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */ /* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux); ret = drm_dp_mst_register_i2c_bus(&port->aux);
break; break;
case DP_PEER_DEVICE_MST_BRANCHING: case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad); lct = drm_dp_calculate_rad(port, rad);
mstb = drm_dp_add_mst_branch_device(lct, rad);
if (!mstb) {
ret = -ENOMEM;
DRM_ERROR("Failed to create MSTB for port %p", port);
goto out;
}
mutex_lock(&mgr->lock);
port->mstb = mstb;
mstb->mgr = port->mgr;
mstb->port_parent = port;
port->mstb = drm_dp_add_mst_branch_device(lct, rad);
if (port->mstb) {
port->mstb->mgr = port->mgr;
port->mstb->port_parent = port;
/* /*
* Make sure this port's memory allocation stays * Make sure this port's memory allocation stays
* around until its child MSTB releases it * around until its child MSTB releases it
*/ */
drm_dp_mst_get_port_malloc(port); drm_dp_mst_get_port_malloc(port);
mutex_unlock(&mgr->lock);
send_link = true; /* And make sure we send a link address for this */
} ret = 1;
break; break;
} }
return send_link;
out:
if (ret < 0)
port->pdt = DP_PEER_DEVICE_NONE;
return ret;
} }
/** /**
...@@ -1903,26 +2118,64 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, ...@@ -1903,26 +2118,64 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
static void static void
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
struct drm_device *dev, struct drm_dp_mst_port *port)
struct drm_dp_link_addr_reply_port *port_msg)
{ {
struct drm_dp_mst_port *port; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
bool ret; char proppath[255];
bool created = false; int ret;
int old_pdt = 0;
int old_ddps = 0;
port = drm_dp_get_port(mstb, port_msg->port_number); build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
if (!port) { port->connector = mgr->cbs->add_connector(mgr, port, proppath);
port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port->connector) {
if (!port) ret = -ENOMEM;
goto error;
}
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
mgr->cbs->register_connector(port->connector);
return; return;
error:
DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
}
/*
* Drop a topology reference, and unlink the port from the in-memory topology
* layout
*/
static void
drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
mutex_lock(&mgr->lock);
list_del(&port->next);
mutex_unlock(&mgr->lock);
drm_dp_mst_topology_put_port(port);
}
static struct drm_dp_mst_port *
drm_dp_mst_add_port(struct drm_device *dev,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb, u8 port_number)
{
struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return NULL;
kref_init(&port->topology_kref); kref_init(&port->topology_kref);
kref_init(&port->malloc_kref); kref_init(&port->malloc_kref);
port->parent = mstb; port->parent = mstb;
port->port_num = port_msg->port_number; port->port_num = port_number;
port->mgr = mstb->mgr; port->mgr = mgr;
port->aux.name = "DPMST"; port->aux.name = "DPMST";
port->aux.dev = dev->dev; port->aux.dev = dev->dev;
port->aux.is_remote = true; port->aux.is_remote = true;
...@@ -1931,16 +2184,64 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -1931,16 +2184,64 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* Make sure the memory allocation for our parent branch stays * Make sure the memory allocation for our parent branch stays
* around until our own memory allocation is released * around until our own memory allocation is released
*/ */
drm_dp_mst_get_mstb_malloc(mstb); drm_dp_mst_get_mstb_malloc(mstb);
return port;
}
static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
port = drm_dp_mst_add_port(dev, mgr, mstb,
port_msg->port_number);
if (!port)
return -ENOMEM;
created = true;
changed = true;
} else if (!port->input && port_msg->input_port && port->connector) {
/* Since port->connector can't be changed here, we create a
* new port if input_port changes from 0 to 1
*/
drm_dp_mst_topology_unlink_port(mgr, port);
drm_dp_mst_topology_put_port(port);
port = drm_dp_mst_add_port(dev, mgr, mstb,
port_msg->port_number);
if (!port)
return -ENOMEM;
changed = true;
created = true;
} else if (port->input && !port_msg->input_port) {
changed = true;
} else if (port->connector) {
/* We're updating a port that's exposed to userspace, so do it
* under lock
*/
drm_modeset_lock(&mgr->base.lock, NULL);
created = true;
} else {
old_pdt = port->pdt;
old_ddps = port->ddps; old_ddps = port->ddps;
changed = port->ddps != port_msg->ddps ||
(port->ddps &&
(port->ldps != port_msg->legacy_device_plug_status ||
port->dpcd_rev != port_msg->dpcd_revision ||
port->mcs != port_msg->mcs ||
port->pdt != port_msg->peer_device_type ||
port->num_sdp_stream_sinks !=
port_msg->num_sdp_stream_sinks));
} }
port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port; port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs; port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps; port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status; port->ldps = port_msg->legacy_device_plug_status;
...@@ -1951,78 +2252,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -1951,78 +2252,104 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference /* manage mstb port lists with mgr lock - take a reference
for this list */ for this list */
if (created) { if (created) {
mutex_lock(&mstb->mgr->lock); mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port); drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports); list_add(&port->next, &mstb->ports);
mutex_unlock(&mstb->mgr->lock); mutex_unlock(&mgr->lock);
} }
if (old_ddps != port->ddps) { if (old_ddps != port->ddps) {
if (port->ddps) { if (port->ddps) {
if (!port->input) { if (!port->input) {
drm_dp_send_enum_path_resources(mstb->mgr, drm_dp_send_enum_path_resources(mgr, mstb,
mstb, port); port);
} }
} else { } else {
port->available_pbn = 0; port->available_pbn = 0;
} }
} }
if (old_pdt != port->pdt && !port->input) { ret = drm_dp_port_set_pdt(port, new_pdt);
drm_dp_port_teardown_pdt(port, old_pdt); if (ret == 1) {
send_link_addr = true;
ret = drm_dp_port_setup_pdt(port); } else if (ret < 0) {
if (ret == true) DRM_ERROR("Failed to change PDT on port %p: %d\n",
drm_dp_send_link_address(mstb->mgr, port->mstb); port, ret);
goto fail;
} }
if (created && !port->input) { /*
char proppath[255]; * If this port wasn't just created, then we're reprobing because
* we're coming out of suspend. In this case, always resend the link
build_mst_prop_path(mstb, port->port_num, proppath, * address if there's an MSTB on this port
sizeof(proppath)); */
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
port, send_link_addr = true;
proppath);
if (!port->connector) { if (port->connector)
/* remove it from the port list */ drm_modeset_unlock(&mgr->base.lock);
mutex_lock(&mstb->mgr->lock); else if (!port->input)
list_del(&port->next); drm_dp_mst_port_add_connector(mstb, port);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */ if (send_link_addr && port->mstb) {
drm_dp_mst_topology_put_port(port); ret = drm_dp_send_link_address(mgr, port->mstb);
goto out; if (ret == 1) /* MSTB below us changed */
} changed = true;
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || else if (ret < 0)
port->pdt == DP_PEER_DEVICE_SST_SINK) && goto fail_put;
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
} }
out:
/* put reference to this port */ /* put reference to this port */
drm_dp_mst_topology_put_port(port); drm_dp_mst_topology_put_port(port);
return changed;
fail:
drm_dp_mst_topology_unlink_port(mgr, port);
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
fail_put:
drm_dp_mst_topology_put_port(port);
return ret;
} }
static void static void
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat) struct drm_dp_connection_status_notify *conn_stat)
{ {
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
int old_pdt; int old_ddps, ret;
int old_ddps; u8 new_pdt;
bool dowork = false; bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number); port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port) if (!port)
return; return;
if (port->connector) {
if (!port->input && conn_stat->input_port) {
/*
* We can't remove a connector from an already exposed
* port, so just throw the port out and make sure we
* reprobe the link address of it's parent MSTB
*/
drm_dp_mst_topology_unlink_port(mgr, port);
mstb->link_address_sent = false;
dowork = true;
goto out;
}
/* Locking is only needed if the port's exposed to userspace */
drm_modeset_lock(&mgr->base.lock, NULL);
} else if (port->input && !conn_stat->input_port) {
create_connector = true;
/* Reprobe link address so we get num_sdp_streams */
mstb->link_address_sent = false;
dowork = true;
}
old_ddps = port->ddps; old_ddps = port->ddps;
old_pdt = port->pdt; port->input = conn_stat->input_port;
port->pdt = conn_stat->peer_device_type;
port->mcs = conn_stat->message_capability_status; port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status; port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status;
...@@ -2034,17 +2361,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2034,17 +2361,27 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0; port->available_pbn = 0;
} }
} }
if (old_pdt != port->pdt && !port->input) {
drm_dp_port_teardown_pdt(port, old_pdt);
if (drm_dp_port_setup_pdt(port)) new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
ret = drm_dp_port_set_pdt(port, new_pdt);
if (ret == 1) {
dowork = true; dowork = true;
} else if (ret < 0) {
DRM_ERROR("Failed to change PDT for port %p: %d\n",
port, ret);
dowork = false;
} }
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
drm_dp_mst_port_add_connector(mstb, port);
out:
drm_dp_mst_topology_put_port(port); drm_dp_mst_topology_put_port(port);
if (dowork) if (dowork)
queue_work(system_long_wq, &mstb->mgr->work); queue_work(system_long_wq, &mstb->mgr->work);
} }
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
...@@ -2130,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, ...@@ -2130,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb; return mstb;
} }
static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb) struct drm_dp_mst_branch *mstb)
{ {
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child; int ret;
if (!mstb->link_address_sent) bool changed = false;
drm_dp_send_link_address(mgr, mstb);
if (!mstb->link_address_sent) {
ret = drm_dp_send_link_address(mgr, mstb);
if (ret == 1)
changed = true;
else if (ret < 0)
return ret;
}
list_for_each_entry(port, &mstb->ports, next) { list_for_each_entry(port, &mstb->ports, next) {
if (port->input) struct drm_dp_mst_branch *mstb_child = NULL;
continue;
if (!port->ddps) if (port->input || !port->ddps)
continue; continue;
if (!port->available_pbn) if (!port->available_pbn) {
drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port); drm_dp_send_enum_path_resources(mgr, mstb, port);
drm_modeset_unlock(&mgr->base.lock);
changed = true;
}
if (port->mstb) { if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated( mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb); mgr, port->mstb);
if (mstb_child) { if (mstb_child) {
drm_dp_check_and_send_link_address(mgr, mstb_child); ret = drm_dp_check_and_send_link_address(mgr,
mstb_child);
drm_dp_mst_topology_put_mstb(mstb_child); drm_dp_mst_topology_put_mstb(mstb_child);
if (ret == 1)
changed = true;
else if (ret < 0)
return ret;
} }
} }
}
return changed;
} }
static void drm_dp_mst_link_probe_work(struct work_struct *work) static void drm_dp_mst_link_probe_work(struct work_struct *work)
{ {
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); struct drm_dp_mst_topology_mgr *mgr =
container_of(work, struct drm_dp_mst_topology_mgr, work);
struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb;
int ret; int ret;
mutex_lock(&mgr->probe_lock);
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
mstb = mgr->mst_primary; mstb = mgr->mst_primary;
if (mstb) { if (mstb) {
...@@ -2173,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) ...@@ -2173,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL; mstb = NULL;
} }
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
if (mstb) { if (!mstb) {
drm_dp_check_and_send_link_address(mgr, mstb); mutex_unlock(&mgr->probe_lock);
drm_dp_mst_topology_put_mstb(mstb); return;
} }
ret = drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_mst_topology_put_mstb(mstb);
mutex_unlock(&mgr->probe_lock);
if (ret)
drm_kms_helper_hotplug_event(dev);
} }
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
...@@ -2422,16 +2787,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) ...@@ -2422,16 +2787,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
} }
} }
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb) struct drm_dp_mst_branch *mstb)
{ {
struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply; struct drm_dp_link_address_ack_reply *reply;
int i, len, ret; struct drm_dp_mst_port *port, *tmp;
int i, len, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) if (!txmsg)
return; return -ENOMEM;
txmsg->dst = mstb; txmsg->dst = mstb;
len = build_link_address(txmsg); len = build_link_address(txmsg);
...@@ -2457,16 +2824,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, ...@@ -2457,16 +2824,39 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_check_mstb_guid(mstb, reply->guid); drm_dp_check_mstb_guid(mstb, reply->guid);
for (i = 0; i < reply->nports; i++) for (i = 0; i < reply->nports; i++) {
drm_dp_mst_handle_link_address_port(mstb, mgr->dev, port_mask |= BIT(reply->ports[i].port_number);
ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
&reply->ports[i]); &reply->ports[i]);
if (ret == 1)
changed = true;
else if (ret < 0)
goto out;
}
drm_kms_helper_hotplug_event(mgr->dev); /* Prune any ports that are currently a part of mstb in our in-memory
* topology, but were not seen in this link address. Usually this
* means that they were removed while the topology was out of sync,
* e.g. during suspend/resume
*/
mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
if (port_mask & BIT(port->port_num))
continue;
DRM_DEBUG_KMS("port %d was not in link address, removing\n",
port->port_num);
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
changed = true;
}
mutex_unlock(&mgr->lock);
out: out:
if (ret <= 0) if (ret <= 0)
mstb->link_address_sent = false; mstb->link_address_sent = false;
kfree(txmsg); kfree(txmsg);
return ret < 0 ? ret : changed;
} }
static int static int
...@@ -3071,6 +3461,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms ...@@ -3071,6 +3461,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
static void
drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
/* The link address will need to be re-sent on resume */
mstb->link_address_sent = false;
list_for_each_entry(port, &mstb->ports, next) {
/* The PBN for each port will also need to be re-probed */
port->available_pbn = 0;
if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
}
}
/** /**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend * @mgr: manager to suspend
...@@ -3084,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) ...@@ -3084,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC); DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work); flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work); flush_work(&mgr->delayed_destroy_work);
mutex_lock(&mgr->lock);
if (mgr->mst_state && mgr->mst_primary)
drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
mutex_unlock(&mgr->lock);
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/** /**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager * drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume * @mgr: manager to resume
* @sync: whether or not to perform topology reprobing synchronously
* *
* This will fetch DPCD and see if the device is still there, * This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return. * if it is, it will rewrite the MSTM control bits, and return.
* *
* if the device fails this returns -1, and the driver should do * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked. * a full MST reprobe, in case we were undocked.
*
* During system resume (where it is assumed that the driver will be calling
* drm_atomic_helper_resume()) this function should be called beforehand with
* @sync set to true. In contexts like runtime resume where the driver is not
* expected to be calling drm_atomic_helper_resume(), this function should be
* called with @sync set to false in order to avoid deadlocking.
*
* Returns: -1 if the MST topology was removed while we were suspended, 0
* otherwise.
*/ */
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync)
{ {
int ret = 0; int ret;
u8 guid[16];
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (mgr->mst_primary) { ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
int sret; DP_RECEIVER_CAP_SIZE);
u8 guid[16]; if (ret != DP_RECEIVER_CAP_SIZE) {
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
ret = -1; goto out_fail;
goto out_unlock;
} }
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) { if (ret < 0) {
DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
ret = -1; goto out_fail;
goto out_unlock;
} }
/* Some hubs forget their guids after they resume */ /* Some hubs forget their guids after they resume */
sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (sret != 16) { if (ret != 16) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
ret = -1; goto out_fail;
goto out_unlock;
} }
drm_dp_check_mstb_guid(mgr->mst_primary, guid); drm_dp_check_mstb_guid(mgr->mst_primary, guid);
ret = 0; /*
} else * For the final step of resuming the topology, we need to bring the
ret = -1; * state of our in-memory topology back into sync with reality. So,
* restart the probing process as if we're probing a new hub
*/
queue_work(system_long_wq, &mgr->work);
mutex_unlock(&mgr->lock);
out_unlock: if (sync) {
DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
flush_work(&mgr->work);
}
return 0;
out_fail:
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return ret; return -1;
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
...@@ -3256,70 +3690,133 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) ...@@ -3256,70 +3690,133 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
return 0; return 0;
} }
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
{ {
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
struct drm_dp_mst_branch *mstb = NULL; struct drm_dp_mst_branch *mstb = NULL;
const u8 *guid; struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
bool seqno; struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
bool hotplug = false;
if (!drm_dp_get_one_sb_msg(mgr, true)) if (hdr->broadcast) {
goto out; const u8 *guid = NULL;
if (!mgr->up_req_recv.have_eomt) if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
return 0; guid = msg->u.conn_stat.guid;
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
if (!hdr->broadcast) { mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
if (!mstb) { if (!mstb) {
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
hdr->lct); hdr->lct);
goto out; return false;
} }
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
hotplug = true;
} }
seqno = hdr->seqno; drm_dp_mst_topology_put_mstb(mstb);
drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); return hotplug;
}
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) static void drm_dp_mst_up_req_work(struct work_struct *work)
guid = msg.u.conn_stat.guid; {
else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) struct drm_dp_mst_topology_mgr *mgr =
guid = msg.u.resource_stat.guid; container_of(work, struct drm_dp_mst_topology_mgr,
else up_req_work);
goto out; struct drm_dp_pending_up_req *up_req;
bool send_hotplug = false;
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, mutex_lock(&mgr->probe_lock);
false); while (true) {
mutex_lock(&mgr->up_req_lock);
up_req = list_first_entry_or_null(&mgr->up_req_list,
struct drm_dp_pending_up_req,
next);
if (up_req)
list_del(&up_req->next);
mutex_unlock(&mgr->up_req_lock);
if (!up_req)
break;
if (!mstb) { send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); kfree(up_req);
if (!mstb) { }
DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mutex_unlock(&mgr->probe_lock);
hdr->lct);
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
struct drm_dp_pending_up_req *up_req;
bool seqno;
if (!drm_dp_get_one_sb_msg(mgr, true))
goto out; goto out;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
if (!up_req) {
DRM_ERROR("Not enough memory to process MST up req\n");
return -ENOMEM;
} }
INIT_LIST_HEAD(&up_req->next);
seqno = hdr->seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
goto out;
} }
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat); seqno, false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
msg.u.conn_stat.port_number, conn_stat->port_number,
msg.u.conn_stat.legacy_device_plug_status, conn_stat->legacy_device_plug_status,
msg.u.conn_stat.displayport_device_plug_status, conn_stat->displayport_device_plug_status,
msg.u.conn_stat.message_capability_status, conn_stat->message_capability_status,
msg.u.conn_stat.input_port, conn_stat->input_port,
msg.u.conn_stat.peer_device_type); conn_stat->peer_device_type);
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
drm_kms_helper_hotplug_event(mgr->dev);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
msg.u.resource_stat.port_number, res_stat->port_number,
msg.u.resource_stat.available_pbn); res_stat->available_pbn);
} }
drm_dp_mst_topology_put_mstb(mstb); up_req->hdr = *hdr;
mutex_lock(&mgr->up_req_lock);
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
out: out:
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0; return 0;
...@@ -3366,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); ...@@ -3366,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/** /**
* drm_dp_mst_detect_port() - get connection status for an MST port * drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port * @connector: DRM connector for this port
* @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port * @mgr: manager for this port
* @port: unverified pointer to a port * @port: pointer to a port
* *
* This returns the current connection state for a port. It validates the * This returns the current connection state for a port.
* port pointer still exists so the caller doesn't require a reference
*/ */
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, int
struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{ {
enum drm_connector_status status = connector_status_disconnected; int ret;
/* we need to search for the port in the mgr in case it's gone */ /* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port); port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port) if (!port)
return connector_status_disconnected; return connector_status_disconnected;
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
goto out;
ret = connector_status_disconnected;
if (!port->ddps) if (!port->ddps)
goto out; goto out;
...@@ -3391,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector ...@@ -3391,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break; break;
case DP_PEER_DEVICE_SST_SINK: case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected; ret = connector_status_connected;
/* for logical ports - cache the EDID */ /* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) { if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc); port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
...@@ -3399,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector ...@@ -3399,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break; break;
case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps) if (port->ldps)
status = connector_status_connected; ret = connector_status_connected;
break; break;
} }
out: out:
drm_dp_mst_topology_put_port(port); drm_dp_mst_topology_put_port(port);
return status; return ret;
} }
EXPORT_SYMBOL(drm_dp_mst_detect_port); EXPORT_SYMBOL(drm_dp_mst_detect_port);
...@@ -3994,34 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work) ...@@ -3994,34 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
} }
static void drm_dp_destroy_connector_work(struct work_struct *work) static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{ {
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); if (port->connector)
struct drm_dp_mst_port *port; port->mgr->cbs->destroy_connector(port->mgr, port->connector);
bool send_hotplug = false;
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
drm_dp_mst_put_port_malloc(port);
}
static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
}
mutex_unlock(&mgr->lock);
/* drop any tx slots msg */
mutex_lock(&mstb->mgr->qlock);
if (mstb->tx_slots[0]) {
mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
mstb->tx_slots[0] = NULL;
wake_tx = true;
}
if (mstb->tx_slots[1]) {
mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
mstb->tx_slots[1] = NULL;
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
if (wake_tx)
wake_up_all(&mstb->mgr->tx_waitq);
drm_dp_mst_put_mstb_malloc(mstb);
}
static void drm_dp_delayed_destroy_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr =
container_of(work, struct drm_dp_mst_topology_mgr,
delayed_destroy_work);
bool send_hotplug = false, go_again;
/* /*
* Not a regular list traverse as we have to drop the destroy * Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex. * ordering between this lock and the config mutex.
*/ */
do {
go_again = false;
for (;;) { for (;;) {
mutex_lock(&mgr->destroy_connector_lock); struct drm_dp_mst_branch *mstb;
port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
if (!port) { mutex_lock(&mgr->delayed_destroy_lock);
mutex_unlock(&mgr->destroy_connector_lock); mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
struct drm_dp_mst_branch,
destroy_next);
if (mstb)
list_del(&mstb->destroy_next);
mutex_unlock(&mgr->delayed_destroy_lock);
if (!mstb)
break; break;
drm_dp_delayed_destroy_mstb(mstb);
go_again = true;
} }
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
mgr->cbs->destroy_connector(mgr, port->connector); for (;;) {
struct drm_dp_mst_port *port;
mutex_lock(&mgr->delayed_destroy_lock);
port = list_first_entry_or_null(&mgr->destroy_port_list,
struct drm_dp_mst_port,
next);
if (port)
list_del(&port->next);
mutex_unlock(&mgr->delayed_destroy_lock);
drm_dp_port_teardown_pdt(port, port->pdt); if (!port)
port->pdt = DP_PEER_DEVICE_NONE; break;
drm_dp_mst_put_port_malloc(port); drm_dp_delayed_destroy_port(port);
send_hotplug = true; send_hotplug = true;
go_again = true;
} }
} while (go_again);
if (send_hotplug) if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev); drm_kms_helper_hotplug_event(mgr->dev);
} }
...@@ -4208,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, ...@@ -4208,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock); mutex_init(&mgr->lock);
mutex_init(&mgr->qlock); mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock); mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock); mutex_init(&mgr->delayed_destroy_lock);
mutex_init(&mgr->up_req_lock);
mutex_init(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_init(&mgr->topology_ref_history_lock);
#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_LIST_HEAD(&mgr->destroy_port_list);
INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq); init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev; mgr->dev = dev;
mgr->aux = aux; mgr->aux = aux;
...@@ -4254,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) ...@@ -4254,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{ {
drm_dp_mst_topology_mgr_set_mst(mgr, false); drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work); flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work); cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads); kfree(mgr->payloads);
mgr->payloads = NULL; mgr->payloads = NULL;
...@@ -4266,10 +4849,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) ...@@ -4266,10 +4849,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
drm_atomic_private_obj_fini(&mgr->base); drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL; mgr->funcs = NULL;
mutex_destroy(&mgr->destroy_connector_lock); mutex_destroy(&mgr->delayed_destroy_lock);
mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->payload_lock);
mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock); mutex_destroy(&mgr->lock);
mutex_destroy(&mgr->up_req_lock);
mutex_destroy(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_destroy(&mgr->topology_ref_history_lock);
#endif
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
......
...@@ -7625,7 +7625,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) ...@@ -7625,7 +7625,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (!intel_dp->can_mst) if (!intel_dp->can_mst)
continue; continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
true);
if (ret) { if (ret) {
intel_dp->is_mst = false; intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
......
...@@ -391,20 +391,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) ...@@ -391,20 +391,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret; return ret;
} }
static enum drm_connector_status
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
intel_connector->port);
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property, .atomic_set_property = intel_digital_connector_atomic_set_property,
...@@ -465,11 +452,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c ...@@ -465,11 +452,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base; return &intel_dp->mst_encoders[crtc->pipe]->base.base;
} }
static int
intel_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes, .get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid, .mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check, .atomic_check = intel_dp_mst_atomic_check,
.detect_ctx = intel_dp_mst_detect,
}; };
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
......
...@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector, ...@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
} }
static const struct drm_connector_helper_funcs static int
nv50_mstc_help = { nv50_mstc_detect(struct drm_connector *connector,
.get_modes = nv50_mstc_get_modes, struct drm_modeset_acquire_ctx *ctx, bool force)
.mode_valid = nv50_mstc_mode_valid,
.best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
};
static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force)
{ {
struct nv50_mstc *mstc = nv50_mstc(connector); struct nv50_mstc *mstc = nv50_mstc(connector);
enum drm_connector_status conn_status;
int ret; int ret;
if (drm_connector_is_unregistered(connector)) if (drm_connector_is_unregistered(connector))
...@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) ...@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES) if (ret < 0 && ret != -EACCES)
return connector_status_disconnected; return connector_status_disconnected;
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port); mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev);
return conn_status; return ret;
} }
static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
.best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
.detect_ctx = nv50_mstc_detect,
};
static void static void
nv50_mstc_destroy(struct drm_connector *connector) nv50_mstc_destroy(struct drm_connector *connector)
{ {
...@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector) ...@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs static const struct drm_connector_funcs
nv50_mstc = { nv50_mstc = {
.reset = nouveau_conn_reset, .reset = nouveau_conn_reset,
.detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy, .destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
...@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm) ...@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
} }
static void static void
nv50_mstm_init(struct nv50_mstm *mstm) nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{ {
int ret; int ret;
if (!mstm || !mstm->mgr.mst_state) if (!mstm || !mstm->mgr.mst_state)
return; return;
ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) { if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev); drm_kms_helper_hotplug_event(mstm->mgr.dev);
...@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) ...@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder = struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder); nouveau_encoder(encoder);
nv50_mstm_init(nv_encoder->dp.mstm); nv50_mstm_init(nv_encoder->dp.mstm, runtime);
} }
} }
......
...@@ -1130,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) ...@@ -1130,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name; const char *name = connector->name;
struct nouveau_encoder *nv_encoder; struct nouveau_encoder *nv_encoder;
int ret; int ret;
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
drm_dp_cec_irq(&nv_connector->aux);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
nv50_mstm_service(nv_encoder->dp.mstm);
return NVIF_NOTIFY_KEEP;
}
ret = pm_runtime_get(drm->dev->dev); ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) { if (ret == 0) {
...@@ -1150,14 +1160,6 @@ nouveau_connector_hotplug(struct nvif_notify *notify) ...@@ -1150,14 +1160,6 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP; return NVIF_NOTIFY_DROP;
} }
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
drm_dp_cec_irq(&nv_connector->aux);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
if (!plugged) if (!plugged)
drm_dp_cec_unset_edid(&nv_connector->aux); drm_dp_cec_unset_edid(&nv_connector->aux);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
...@@ -1167,7 +1169,6 @@ nouveau_connector_hotplug(struct nvif_notify *notify) ...@@ -1167,7 +1169,6 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
} }
drm_helper_hpd_irq_event(connector->dev); drm_helper_hpd_irq_event(connector->dev);
}
pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev);
......
...@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) ...@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter; struct drm_connector_list_iter conn_iter;
int ret; int ret;
/*
* Enable hotplug interrupts (done as early as possible, since we need
* them for MST)
*/
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
drm_connector_list_iter_end(&conn_iter);
ret = disp->init(dev, resume, runtime); ret = disp->init(dev, resume, runtime);
if (ret) if (ret)
return ret; return ret;
...@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) ...@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/ */
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
drm_connector_list_iter_end(&conn_iter);
return ret; return ret;
} }
......
...@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) ...@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base; return &radeon_connector->mst_encoder->base;
} }
static int
radeon_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct radeon_connector *radeon_connector =
to_radeon_connector(connector);
struct radeon_connector *master = radeon_connector->mst_port;
return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
radeon_connector->port);
}
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes, .get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid, .mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder, .best_encoder = radeon_mst_best_encoder,
.detect_ctx = radeon_dp_mst_detect,
}; };
static enum drm_connector_status
radeon_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector *master = radeon_connector->mst_port;
return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
}
static void static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector) radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{ {
...@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector) ...@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms, .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy, .destroy = radeon_dp_mst_connector_destroy,
}; };
......
...@@ -26,6 +26,26 @@ ...@@ -26,6 +26,26 @@
#include <drm/drm_dp_helper.h> #include <drm/drm_dp_helper.h>
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
#include <linux/timekeeping.h>
enum drm_dp_mst_topology_ref_type {
DRM_DP_MST_TOPOLOGY_REF_GET,
DRM_DP_MST_TOPOLOGY_REF_PUT,
};
struct drm_dp_mst_topology_ref_history {
struct drm_dp_mst_topology_ref_entry {
enum drm_dp_mst_topology_ref_type type;
int count;
ktime_t ts_nsec;
depot_stack_handle_t backtrace;
} *entries;
int len;
};
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
struct drm_dp_mst_branch; struct drm_dp_mst_branch;
/** /**
...@@ -45,21 +65,31 @@ struct drm_dp_vcpi { ...@@ -45,21 +65,31 @@ struct drm_dp_vcpi {
/** /**
* struct drm_dp_mst_port - MST port * struct drm_dp_mst_port - MST port
* @port_num: port number * @port_num: port number
* @input: if this port is an input port. * @input: if this port is an input port. Protected by
* @mcs: message capability status - DP 1.2 spec. * &drm_dp_mst_topology_mgr.base.lock.
* @ddps: DisplayPort Device Plug Status - DP 1.2 * @mcs: message capability status - DP 1.2 spec. Protected by
* @pdt: Peer Device Type * &drm_dp_mst_topology_mgr.base.lock.
* @ldps: Legacy Device Plug Status * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
* @dpcd_rev: DPCD revision of device on this port * &drm_dp_mst_topology_mgr.base.lock.
* @num_sdp_streams: Number of simultaneous streams * @pdt: Peer Device Type. Protected by
* @num_sdp_stream_sinks: Number of stream sinks * &drm_dp_mst_topology_mgr.base.lock.
* @available_pbn: Available bandwidth for this port. * @ldps: Legacy Device Plug Status. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @dpcd_rev: DPCD revision of device on this port. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @num_sdp_streams: Number of simultaneous streams. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @num_sdp_stream_sinks: Number of stream sinks. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @available_pbn: Available bandwidth for this port. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device * @next: link to next port on this branch device
* @mstb: branch device attach below this port * @aux: i2c aux transport to talk to device connected to this port, protected
* @aux: i2c aux transport to talk to device connected to this port. * by &drm_dp_mst_topology_mgr.base.lock.
* @parent: branch device parent of this port * @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port. * @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to. * @connector: DRM connector this port is connected to. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under. * @mgr: topology manager this port lives under.
* *
* This structure represents an MST port endpoint on a device somewhere * This structure represents an MST port endpoint on a device somewhere
...@@ -79,6 +109,14 @@ struct drm_dp_mst_port { ...@@ -79,6 +109,14 @@ struct drm_dp_mst_port {
*/ */
struct kref malloc_kref; struct kref malloc_kref;
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
/**
* @topology_ref_history: A history of each topology
* reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
*/
struct drm_dp_mst_topology_ref_history topology_ref_history;
#endif
u8 port_num; u8 port_num;
bool input; bool input;
bool mcs; bool mcs;
...@@ -90,7 +128,17 @@ struct drm_dp_mst_port { ...@@ -90,7 +128,17 @@ struct drm_dp_mst_port {
u8 num_sdp_stream_sinks; u8 num_sdp_stream_sinks;
uint16_t available_pbn; uint16_t available_pbn;
struct list_head next; struct list_head next;
struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */ /**
* @mstb: the branch device connected to this port, if there is one.
* This should be considered protected for reading by
* &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
* &drm_dp_mst_topology_mgr.up_req_work and
* &drm_dp_mst_topology_mgr.work, which do not grab
* &drm_dp_mst_topology_mgr.lock during reads but are the only
* updaters of this list and are protected from writing concurrently
* by &drm_dp_mst_topology_mgr.probe_lock.
*/
struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */ struct drm_dp_aux aux; /* i2c bus for this port? */
struct drm_dp_mst_branch *parent; struct drm_dp_mst_branch *parent;
...@@ -116,7 +164,6 @@ struct drm_dp_mst_port { ...@@ -116,7 +164,6 @@ struct drm_dp_mst_port {
* @lct: Link count total to talk to this branch device. * @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch. * @num_ports: number of ports on the branch.
* @msg_slots: one bit per transmitted msg slot. * @msg_slots: one bit per transmitted msg slot.
* @ports: linked list of ports on this branch.
* @port_parent: pointer to the port parent, NULL if toplevel. * @port_parent: pointer to the port parent, NULL if toplevel.
* @mgr: topology manager for this branch device. * @mgr: topology manager for this branch device.
* @tx_slots: transmission slots for this device. * @tx_slots: transmission slots for this device.
...@@ -143,11 +190,35 @@ struct drm_dp_mst_branch { ...@@ -143,11 +190,35 @@ struct drm_dp_mst_branch {
*/ */
struct kref malloc_kref; struct kref malloc_kref;
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
/**
* @topology_ref_history: A history of each topology
* reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
*/
struct drm_dp_mst_topology_ref_history topology_ref_history;
#endif
/**
* @destroy_next: linked-list entry used by
* drm_dp_delayed_destroy_work()
*/
struct list_head destroy_next;
u8 rad[8]; u8 rad[8];
u8 lct; u8 lct;
int num_ports; int num_ports;
int msg_slots; int msg_slots;
/**
* @ports: the list of ports on this branch device. This should be
* considered protected for reading by &drm_dp_mst_topology_mgr.lock.
* There are two exceptions to this:
* &drm_dp_mst_topology_mgr.up_req_work and
* &drm_dp_mst_topology_mgr.work, which do not grab
* &drm_dp_mst_topology_mgr.lock during reads but are the only
* updaters of this list and are protected from updating the list
* concurrently by @drm_dp_mst_topology_mgr.probe_lock
*/
struct list_head ports; struct list_head ports;
/* list of tx ops queue for this port */ /* list of tx ops queue for this port */
...@@ -494,6 +565,13 @@ struct drm_dp_mst_topology_mgr { ...@@ -494,6 +565,13 @@ struct drm_dp_mst_topology_mgr {
*/ */
struct mutex lock; struct mutex lock;
/**
* @probe_lock: Prevents @work and @up_req_work, the only writers of
* &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
* while they update the topology.
*/
struct mutex probe_lock;
/** /**
* @mst_state: If this manager is enabled for an MST capable port. False * @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected. * if no MST sink/branch devices is connected.
...@@ -571,18 +649,49 @@ struct drm_dp_mst_topology_mgr { ...@@ -571,18 +649,49 @@ struct drm_dp_mst_topology_mgr {
struct work_struct tx_work; struct work_struct tx_work;
/** /**
* @destroy_connector_list: List of to be destroyed connectors. * @destroy_port_list: List of to be destroyed connectors.
*/
struct list_head destroy_port_list;
/**
* @destroy_branch_device_list: List of to be destroyed branch
* devices.
*/ */
struct list_head destroy_connector_list; struct list_head destroy_branch_device_list;
/** /**
* @destroy_connector_lock: Protects @connector_list. * @delayed_destroy_lock: Protects @destroy_port_list and
* @destroy_branch_device_list.
*/ */
struct mutex destroy_connector_lock; struct mutex delayed_destroy_lock;
/** /**
* @destroy_connector_work: Work item to destroy connectors. Needed to * @delayed_destroy_work: Work item to destroy MST port and branch
* avoid locking inversion. * devices, needed to avoid locking inversion.
*/ */
struct work_struct destroy_connector_work; struct work_struct delayed_destroy_work;
/**
* @up_req_list: List of pending up requests from the topology that
* need to be processed, in chronological order.
*/
struct list_head up_req_list;
/**
* @up_req_lock: Protects @up_req_list
*/
struct mutex up_req_lock;
/**
* @up_req_work: Work item to process up requests received from the
* topology. Needed to avoid blocking hotplug handling and sideband
* transmissions.
*/
struct work_struct up_req_work;
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
/**
* @topology_ref_history_lock: protects
* &drm_dp_mst_port.topology_ref_history and
* &drm_dp_mst_branch.topology_ref_history.
*/
struct mutex topology_ref_history_lock;
#endif
}; };
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
...@@ -599,7 +708,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms ...@@ -599,7 +708,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); int
drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port); struct drm_dp_mst_port *port);
...@@ -638,7 +751,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m, ...@@ -638,7 +751,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
bool sync);
ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size); unsigned int offset, void *buffer, size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment