Commit 4c0449c9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-09-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "A bunch of small fixes, some of the i915 ones have been out for a
  while and got better commit msg explaining some better reasoning
  behind them (hopefully this trend continues).

  Otherwise there a few AMD related ones mostly small, one radeon PLL
  regression fix and a bunch of small mediatek fixes.

  amdgpu:
   - Sienna Cichlid fixes
   - Navy Flounder fixes
   - DC fixes

  amdkfd:
   - Fix a GPU reset crash
   - Fix a memory leak

  radeon:
   - Revert a PLL fix that broke other boards

  i915:
   - Avoid exposing a partially constructed context
   - Use RCU instead of mutex for context termination list iteration
   - Avoid data race reported by KCSAN
   - Filter wake_flags passed to default_wake_function

  mediatek:
   - Fix scrolling of panel
   - Remove duplicated include
   - Use CPU when fail to get cmdq event
   - Add missing put_device() call"

* tag 'drm-fixes-2020-09-18' of git://anongit.freedesktop.org/drm/drm: (21 commits)
  drm/amd/display: Don't log hdcp module warnings in dmesg
  drm/amdgpu: declare ta firmware for navy_flounder
  drm/mediatek: Add missing put_device() call in mtk_hdmi_dt_parse_pdata()
  drm/mediatek: Add missing put_device() call in mtk_drm_kms_init()
  drm/mediatek: Add exception handing in mtk_drm_probe() if component init fail
  drm/mediatek: Add missing put_device() call in mtk_ddp_comp_init()
  drm/mediatek: Use CPU when fail to get cmdq event
  drm/mediatek: Remove duplicated include
  drm/i915: Filter wake_flags passed to default_wake_function
  drm/i915: Be wary of data races when reading the active execlists
  drm/i915/gem: Reduce context termination list iteration guard to RCU
  drm/i915/gem: Delay tracking the GEM context until it is registered
  drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is
  drm/radeon: revert "Prefer lower feedback dividers"
  drm/amdgpu: Include sienna_cichlid in USBC PD FW support.
  drm/amd/display: update nv1x stutter latencies
  drm/amd/display: Don't use DRM_ERROR() for DTM add topology
  drm/amd/pm: support runtime pptable update for sienna_cichlid etc.
  drm/amdkfd: fix a memory leak issue
  drm/kfd: fix a system crash issue during GPU recovery
  ...
parents 4cbffc46 1f08fde7
...@@ -178,7 +178,7 @@ static int psp_sw_init(void *handle) ...@@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
return ret; return ret;
} }
if (adev->asic_type == CHIP_NAVI10) { if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
ret= psp_sysfs_init(adev); ret= psp_sysfs_init(adev);
if (ret) { if (ret) {
return ret; return ret;
......
...@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); ...@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin"); MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin"); MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
/* address block */ /* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_FIRMWARE_FLAGS 0x3010024
......
...@@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) ...@@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->sched_running = false; dqm->sched_running = false;
dqm_unlock(dqm); dqm_unlock(dqm);
pm_release_ib(&dqm->packets);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets, hanging); pm_uninit(&dqm->packets, hanging);
...@@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.is_active) { if (q->properties.is_active) {
increment_queue_count(dqm, q->properties.type); increment_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm, execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
} }
......
...@@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) ...@@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{ {
} }
static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
{
struct drm_device *dev = new_crtc_state->crtc->dev;
struct drm_plane *plane;
drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return true;
}
return false;
}
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
{ {
struct drm_atomic_state *state = new_crtc_state->state; struct drm_atomic_state *state = new_crtc_state->state;
...@@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, ...@@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return ret; return ret;
} }
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
/* /*
* We want at least one hardware plane enabled to use * We require the primary plane to be enabled whenever the CRTC is, otherwise
* the stream with a cursor enabled. * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
* planes are disabled, which is not supported by the hardware. And there is legacy
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
*/ */
if (state->enable && state->active && if (state->enable &&
does_crtc_have_active_cursor(state) && !(state->plane_mask & drm_plane_mask(crtc->primary)))
dm_crtc_state->active_planes == 0)
return -EINVAL; return -EINVAL;
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0; return 0;
......
...@@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { ...@@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
}, },
}, },
.num_states = 5, .num_states = 5,
.sr_exit_time_us = 8.6, .sr_exit_time_us = 11.6,
.sr_enter_plus_exit_time_us = 10.9, .sr_enter_plus_exit_time_us = 13.9,
.urgent_latency_us = 4.0, .urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define MOD_HDCP_LOG_H_ #define MOD_HDCP_LOG_H_
#ifdef CONFIG_DRM_AMD_DC_HDCP #ifdef CONFIG_DRM_AMD_DC_HDCP
#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__) #define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
......
...@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp, ...@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) { if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE; display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE; return MOD_HDCP_STATUS_FAILURE;
} }
......
...@@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu) ...@@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/ */
if (smu->uploading_custom_pp_table && if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) && (adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_NAVI12)) (adev->asic_type <= CHIP_NAVY_FLOUNDER))
return 0; return 0;
/* /*
...@@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle) ...@@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle)
int smu_reset(struct smu_context *smu) int smu_reset(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0; int ret;
amdgpu_gfx_off_ctrl(smu->adev, false);
ret = smu_hw_fini(adev); ret = smu_hw_fini(adev);
if (ret) if (ret)
...@@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu) ...@@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu)
return ret; return ret;
ret = smu_late_init(adev); ret = smu_late_init(adev);
if (ret)
return ret;
return ret; amdgpu_gfx_off_ctrl(smu->adev, true);
return 0;
} }
static int smu_suspend(void *handle) static int smu_suspend(void *handle)
......
...@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine) ...@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return __reset_engine(engine); return __reset_engine(engine);
} }
static struct intel_engine_cs *__active_engine(struct i915_request *rq) static bool
__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
{ {
struct intel_engine_cs *engine, *locked; struct intel_engine_cs *engine, *locked;
bool ret = false;
/* /*
* Serialise with __i915_request_submit() so that it sees * Serialise with __i915_request_submit() so that it sees
* is-banned?, or we know the request is already inflight. * is-banned?, or we know the request is already inflight.
*
* Note that rq->engine is unstable, and so we double
* check that we have acquired the lock on the final engine.
*/ */
locked = READ_ONCE(rq->engine); locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->active.lock); spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock); spin_unlock(&locked->active.lock);
spin_lock(&engine->active.lock);
locked = engine; locked = engine;
spin_lock(&locked->active.lock);
} }
engine = NULL; if (!i915_request_completed(rq)) {
if (i915_request_is_active(rq) && rq->fence.error != -EIO) if (i915_request_is_active(rq) && rq->fence.error != -EIO)
engine = rq->engine; *active = locked;
ret = true;
}
spin_unlock_irq(&locked->active.lock); spin_unlock_irq(&locked->active.lock);
return engine; return ret;
} }
static struct intel_engine_cs *active_engine(struct intel_context *ce) static struct intel_engine_cs *active_engine(struct intel_context *ce)
...@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) ...@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline) if (!ce->timeline)
return NULL; return NULL;
mutex_lock(&ce->timeline->mutex); rcu_read_lock();
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
if (i915_request_completed(rq)) if (i915_request_is_active(rq) && i915_request_completed(rq))
break; continue;
/* Check with the backend if the request is inflight */ /* Check with the backend if the request is inflight */
engine = __active_engine(rq); if (__active_engine(rq, &engine))
if (engine)
break; break;
} }
mutex_unlock(&ce->timeline->mutex); rcu_read_unlock();
return engine; return engine;
} }
...@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915) ...@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
ctx->i915 = i915; ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
spin_lock_init(&ctx->stale.lock); spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines); INIT_LIST_HEAD(&ctx->stale.engines);
...@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915) ...@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
return ctx; return ctx;
err_free: err_free:
...@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx, ...@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv, struct drm_i915_file_private *fpriv,
u32 *id) u32 *id)
{ {
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm; struct i915_address_space *vm;
int ret; int ret;
...@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx, ...@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
/* And finally expose ourselves to userspace via the idr */ /* And finally expose ourselves to userspace via the idr */
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
if (ret) if (ret)
put_pid(fetch_and_zero(&ctx->pid)); goto err_pid;
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
return 0;
err_pid:
put_pid(fetch_and_zero(&ctx->pid));
return ret; return ret;
} }
......
...@@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count) ...@@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count); memset_p((void **)ports, NULL, count);
} }
static inline void
copy_ports(struct i915_request **dst, struct i915_request **src, int count)
{
/* A memcpy_p() would be very useful here! */
while (count--)
WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
}
static void execlists_dequeue(struct intel_engine_cs *engine) static void execlists_dequeue(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
...@@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine) ...@@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine)
/* switch pending to inflight */ /* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
memcpy(execlists->inflight, copy_ports(execlists->inflight,
execlists->pending, execlists->pending,
execlists_num_ports(execlists) * execlists_num_ports(execlists));
sizeof(*execlists->pending));
smp_wmb(); /* complete the seqlock */ smp_wmb(); /* complete the seqlock */
WRITE_ONCE(execlists->active, execlists->inflight); WRITE_ONCE(execlists->active, execlists->inflight);
......
...@@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal) ...@@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal)
* As we know that there are always preemption points between * As we know that there are always preemption points between
* requests, we know that only the currently executing request * requests, we know that only the currently executing request
* may be still active even though we have cleared the flag. * may be still active even though we have cleared the flag.
* However, we can't rely on our tracking of ELSP[0] to known * However, we can't rely on our tracking of ELSP[0] to know
* which request is currently active and so maybe stuck, as * which request is currently active and so maybe stuck, as
* the tracking maybe an event behind. Instead assume that * the tracking maybe an event behind. Instead assume that
* if the context is still inflight, then it is still active * if the context is still inflight, then it is still active
* even if the active flag has been cleared. * even if the active flag has been cleared.
*
* To further complicate matters, if there a pending promotion, the HW
* may either perform a context switch to the second inflight execlists,
* or it may switch to the pending set of execlists. In the case of the
* latter, it may send the ACK and we process the event copying the
* pending[] over top of inflight[], _overwriting_ our *active. Since
* this implies the HW is arbitrating and not struck in *active, we do
* not worry about complete accuracy, but we do require no read/write
* tearing of the pointer [the read of the pointer must be valid, even
* as the array is being overwritten, for which we require the writes
* to avoid tearing.]
*
* Note that the read of *execlists->active may race with the promotion
* of execlists->pending[] to execlists->inflight[], overwritting
* the value at *execlists->active. This is fine. The promotion implies
* that we received an ACK from the HW, and so the context is not
* stuck -- if we do not see ourselves in *active, the inflight status
* is valid. If instead we see ourselves being copied into *active,
* we are inflight and may signal the callback.
*/ */
if (!intel_context_inflight(signal->context)) if (!intel_context_inflight(signal->context))
return false; return false;
rcu_read_lock(); rcu_read_lock();
for (port = __engine_active(signal->engine); (rq = *port); port++) { for (port = __engine_active(signal->engine);
(rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
port++) {
if (rq->context == signal->context) { if (rq->context == signal->context) {
inflight = i915_seqno_passed(rq->fence.seqno, inflight = i915_seqno_passed(rq->fence.seqno,
signal->fence.seqno); signal->fence.seqno);
......
...@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, ...@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
do { do {
list_for_each_entry_safe(pos, next, &x->head, entry) { list_for_each_entry_safe(pos, next, &x->head, entry) {
pos->func(pos, int wake_flags;
TASK_NORMAL, fence->error,
&extra); wake_flags = fence->error;
if (pos->func == autoremove_wake_function)
wake_flags = 0;
pos->func(pos, TASK_NORMAL, wake_flags, &extra);
} }
if (list_empty(&extra)) if (list_empty(&extra))
......
...@@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, ...@@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base)); drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL; mtk_crtc->cmdq_client = NULL;
} }
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events", if (mtk_crtc->cmdq_client) {
drm_crtc_index(&mtk_crtc->base), ret = of_property_read_u32_index(priv->mutex_node,
&mtk_crtc->cmdq_event); "mediatek,gce-events",
if (ret) drm_crtc_index(&mtk_crtc->base),
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", &mtk_crtc->cmdq_event);
drm_crtc_index(&mtk_crtc->base)); if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
cmdq_mbox_destroy(mtk_crtc->cmdq_client);
mtk_crtc->cmdq_client = NULL;
}
}
#endif #endif
return 0; return 0;
} }
...@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, ...@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
#if IS_REACHABLE(CONFIG_MTK_CMDQ) #if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (of_address_to_resource(node, 0, &res) != 0) { if (of_address_to_resource(node, 0, &res) != 0) {
dev_err(dev, "Missing reg in %s node\n", node->full_name); dev_err(dev, "Missing reg in %s node\n", node->full_name);
put_device(&larb_pdev->dev);
return -EINVAL; return -EINVAL;
} }
comp->regs_pa = res.start; comp->regs_pa = res.start;
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include "mtk_drm_crtc.h" #include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h" #include "mtk_drm_ddp.h"
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h" #include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h" #include "mtk_drm_drv.h"
#include "mtk_drm_gem.h" #include "mtk_drm_gem.h"
...@@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ...@@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm); ret = drmm_mode_config_init(drm);
if (ret) if (ret)
return ret; goto put_mutex_dev;
drm->mode_config.min_width = 64; drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64; drm->mode_config.min_height = 64;
...@@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ...@@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm); ret = component_bind_all(drm->dev, drm);
if (ret) if (ret)
return ret; goto put_mutex_dev;
/* /*
* We currently support two fixed data streams, each optional, * We currently support two fixed data streams, each optional,
...@@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) ...@@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
} }
if (!dma_dev->dma_parms) { if (!dma_dev->dma_parms) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_component_unbind; goto put_dma_dev;
} }
ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
...@@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm) ...@@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_unset_dma_parms: err_unset_dma_parms:
if (private->dma_parms_allocated) if (private->dma_parms_allocated)
dma_dev->dma_parms = NULL; dma_dev->dma_parms = NULL;
put_dma_dev:
put_device(private->dma_dev);
err_component_unbind: err_component_unbind:
component_unbind_all(drm->dev, drm); component_unbind_all(drm->dev, drm);
put_mutex_dev:
put_device(private->mutex_dev);
return ret; return ret;
} }
...@@ -544,8 +546,13 @@ static int mtk_drm_probe(struct platform_device *pdev) ...@@ -544,8 +546,13 @@ static int mtk_drm_probe(struct platform_device *pdev)
pm_runtime_disable(dev); pm_runtime_disable(dev);
err_node: err_node:
of_node_put(private->mutex_node); of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]); of_node_put(private->comp_node[i]);
if (private->ddp_comp[i]) {
put_device(private->ddp_comp[i]->larb_dev);
private->ddp_comp[i] = NULL;
}
}
return ret; return ret;
} }
......
...@@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) ...@@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
horizontal_backporch_byte = horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
(vm->hback_porch * dsi_tmp_buf_bpp - 10);
else else
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) * horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10); dsi_tmp_buf_bpp;
data_phy_cycles = timing->lpx + timing->da_hs_prepare + data_phy_cycles = timing->lpx + timing->da_hs_prepare +
timing->da_hs_zero + timing->da_hs_exit + 3; timing->da_hs_zero + timing->da_hs_exit;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
......
...@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, ...@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev, dev_err(dev,
"Failed to get system configuration registers: %d\n", "Failed to get system configuration registers: %d\n",
ret); ret);
return ret; goto put_device;
} }
hdmi->sys_regmap = regmap; hdmi->sys_regmap = regmap;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, mem); hdmi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(hdmi->regs)) if (IS_ERR(hdmi->regs)) {
return PTR_ERR(hdmi->regs); ret = PTR_ERR(hdmi->regs);
goto put_device;
}
remote = of_graph_get_remote_node(np, 1, 0); remote = of_graph_get_remote_node(np, 1, 0);
if (!remote) if (!remote) {
return -EINVAL; ret = -EINVAL;
goto put_device;
}
if (!of_device_is_compatible(remote, "hdmi-connector")) { if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote); hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) { if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n"); dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote); of_node_put(remote);
return -EPROBE_DEFER; ret = -EPROBE_DEFER;
goto put_device;
} }
} }
...@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, ...@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
remote); remote);
of_node_put(remote); of_node_put(remote);
return -EINVAL; ret = -EINVAL;
goto put_device;
} }
of_node_put(remote); of_node_put(remote);
...@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, ...@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(i2c_np); of_node_put(i2c_np);
if (!hdmi->ddc_adpt) { if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n"); dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL; ret = -EINVAL;
goto put_device;
} }
return 0; return 0;
put_device:
put_device(hdmi->cec_dev);
return ret;
} }
/* /*
......
...@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, ...@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
/* get matching reference and feedback divider */ /* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max); *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = max(nom * *ref_div * post_div / den, 1u); *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */ /* limit fb divider to its maximum */
if (*fb_div > fb_div_max) { if (*fb_div > fb_div_max) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment