Commit adbe8a3c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-5.8-2020-07-15' of...

Merge tag 'amd-drm-fixes-5.8-2020-07-15' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

amd-drm-fixes-5.8-2020-07-15:

amdgpu:
- Fix a race condition with KIQ
- Preemption fix
- Fix handling of fake MST encoders
- OLED panel fix
- Handle allocation failure in stream construction
- Renoir SMC fix
- SDMA 5.x fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200715213914.3994-1-alexander.deucher@amd.com
parents 524d4f71 05051496
...@@ -1295,27 +1295,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) ...@@ -1295,27 +1295,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
{ {
struct amdgpu_job *job; struct amdgpu_job *job;
struct drm_sched_job *s_job; struct drm_sched_job *s_job, *tmp;
uint32_t preempt_seq; uint32_t preempt_seq;
struct dma_fence *fence, **ptr; struct dma_fence *fence, **ptr;
struct amdgpu_fence_driver *drv = &ring->fence_drv; struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct drm_gpu_scheduler *sched = &ring->sched; struct drm_gpu_scheduler *sched = &ring->sched;
bool preempted = true;
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
return; return;
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
if (preempt_seq <= atomic_read(&drv->last_seq)) if (preempt_seq <= atomic_read(&drv->last_seq)) {
return; preempted = false;
goto no_preempt;
}
preempt_seq &= drv->num_fences_mask; preempt_seq &= drv->num_fences_mask;
ptr = &drv->fences[preempt_seq]; ptr = &drv->fences[preempt_seq];
fence = rcu_dereference_protected(*ptr, 1); fence = rcu_dereference_protected(*ptr, 1);
no_preempt:
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_for_each_entry(s_job, &sched->ring_mirror_list, node) { list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&s_job->node);
sched->ops->free_job(s_job);
continue;
}
job = to_amdgpu_job(s_job); job = to_amdgpu_job(s_job);
if (job->fence == fence) if (preempted && job->fence == fence)
/* mark the job as preempted */ /* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED; job->preemption_status |= AMDGPU_IB_PREEMPTED;
} }
......
...@@ -7513,12 +7513,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ...@@ -7513,12 +7513,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &kiq->ring; struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL; return -EINVAL;
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM; return -ENOMEM;
}
/* assert preemption condition */ /* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false); amdgpu_ring_set_preempt_cond_exec(ring, false);
...@@ -7529,6 +7534,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ...@@ -7529,6 +7534,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
++ring->trail_seq); ++ring->trail_seq);
amdgpu_ring_commit(kiq_ring); amdgpu_ring_commit(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
/* poll the trailing fence */ /* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq == if (ring->trail_seq ==
......
...@@ -314,30 +314,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -314,30 +314,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u64 *wptr = NULL; u64 wptr;
uint64_t local_wptr = 0;
if (ring->use_doorbell) { if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */ /* XXX check if swapping is necessary on BE */
wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
*wptr = (*wptr) >> 2;
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
} else { } else {
u32 lowbit, highbit; wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
wptr = wptr << 32;
wptr = &local_wptr; wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
ring->me, highbit, lowbit);
*wptr = highbit;
*wptr = (*wptr) << 32;
*wptr |= lowbit;
} }
return *wptr; return wptr >> 2;
} }
/** /**
......
...@@ -974,6 +974,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -974,6 +974,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Update the actual used number of crtc */ /* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num; adev->mode_info.num_crtc = adev->dm.display_indexes_num;
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
/* TODO: Add_display_info? */ /* TODO: Add_display_info? */
/* TODO use dynamic cursor width */ /* TODO use dynamic cursor width */
...@@ -997,6 +1000,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -997,6 +1000,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
static void amdgpu_dm_fini(struct amdgpu_device *adev) static void amdgpu_dm_fini(struct amdgpu_device *adev)
{ {
int i;
for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
}
amdgpu_dm_audio_fini(adev); amdgpu_dm_audio_fini(adev);
amdgpu_dm_destroy_drm_device(&adev->dm); amdgpu_dm_destroy_drm_device(&adev->dm);
...@@ -2010,6 +2019,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) ...@@ -2010,6 +2019,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
struct amdgpu_display_manager *dm; struct amdgpu_display_manager *dm;
struct drm_connector *conn_base; struct drm_connector *conn_base;
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct dc_link *link = NULL;
static const u8 pre_computed_values[] = { static const u8 pre_computed_values[] = {
50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
...@@ -2017,6 +2027,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) ...@@ -2017,6 +2027,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
if (!aconnector || !aconnector->dc_link) if (!aconnector || !aconnector->dc_link)
return; return;
link = aconnector->dc_link;
if (link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base; conn_base = &aconnector->base;
adev = conn_base->dev->dev_private; adev = conn_base->dev->dev_private;
dm = &adev->dm; dm = &adev->dm;
......
...@@ -43,6 +43,9 @@ ...@@ -43,6 +43,9 @@
*/ */
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
#define AMDGPU_DM_MAX_CRTC 6
/* /*
#include "include/amdgpu_dal_power_if.h" #include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h" #include "amdgpu_dm_irq.h"
...@@ -328,6 +331,13 @@ struct amdgpu_display_manager { ...@@ -328,6 +331,13 @@ struct amdgpu_display_manager {
* available in FW * available in FW
*/ */
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
/**
* @mst_encoders:
*
* fake encoders used for DP MST.
*/
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
}; };
struct amdgpu_dm_connector { struct amdgpu_dm_connector {
...@@ -356,7 +366,6 @@ struct amdgpu_dm_connector { ...@@ -356,7 +366,6 @@ struct amdgpu_dm_connector {
struct amdgpu_dm_dp_aux dm_dp_aux; struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port; struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
struct drm_dp_aux *dsc_aux; struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */ /* TODO see if we can merge with ddc_bus or make a dm_connector */
......
...@@ -95,7 +95,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) ...@@ -95,7 +95,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
{ {
struct amdgpu_dm_connector *aconnector = struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(connector); to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
if (aconnector->dc_sink) { if (aconnector->dc_sink) {
dc_link_remove_remote_sink(aconnector->dc_link, dc_link_remove_remote_sink(aconnector->dc_link,
...@@ -105,8 +104,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) ...@@ -105,8 +104,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(aconnector->edid); kfree(aconnector->edid);
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(aconnector->port); drm_dp_mst_put_port_malloc(aconnector->port);
kfree(aconnector); kfree(aconnector);
...@@ -243,7 +240,11 @@ static struct drm_encoder * ...@@ -243,7 +240,11 @@ static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector *connector, dm_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state) struct drm_connector_state *connector_state)
{ {
return &to_amdgpu_dm_connector(connector)->mst_encoder->base; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
return &adev->dm.mst_encoders[acrtc->crtc_id].base;
} }
static int static int
...@@ -306,31 +307,27 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { ...@@ -306,31 +307,27 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy, .destroy = amdgpu_dm_encoder_destroy,
}; };
static struct amdgpu_encoder * void
dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
{ {
struct drm_device *dev = connector->base.dev; struct drm_device *dev = adev->ddev;
struct amdgpu_device *adev = dev->dev_private; int i;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
encoder = &amdgpu_encoder->base; for (i = 0; i < adev->dm.display_indexes_num; i++) {
encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
struct drm_encoder *encoder = &amdgpu_encoder->base;
drm_encoder_init( encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
dev,
&amdgpu_encoder->base,
&amdgpu_dm_encoder_funcs,
DRM_MODE_ENCODER_DPMST,
NULL);
drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); drm_encoder_init(
dev,
&amdgpu_encoder->base,
&amdgpu_dm_encoder_funcs,
DRM_MODE_ENCODER_DPMST,
NULL);
return amdgpu_encoder; drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
}
} }
static struct drm_connector * static struct drm_connector *
...@@ -343,6 +340,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -343,6 +340,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector; struct drm_connector *connector;
int i;
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector) if (!aconnector)
...@@ -369,9 +367,10 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -369,9 +367,10 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
master->dc_link, master->dc_link,
master->connector_id); master->connector_id);
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); for (i = 0; i < adev->dm.display_indexes_num; i++) {
drm_connector_attach_encoder(&aconnector->base, drm_connector_attach_encoder(&aconnector->base,
&aconnector->mst_encoder->base); &adev->dm.mst_encoders[i].base);
}
connector->max_bpc_property = master->base.max_bpc_property; connector->max_bpc_property = master->base.max_bpc_property;
if (connector->max_bpc_property) if (connector->max_bpc_property)
......
...@@ -35,6 +35,9 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, ...@@ -35,6 +35,9 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector, struct amdgpu_dm_connector *aconnector,
int link_index); int link_index);
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state); struct dc_state *dc_state);
......
...@@ -56,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) ...@@ -56,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
} }
} }
static void dc_stream_construct(struct dc_stream_state *stream, static bool dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data) struct dc_sink *dc_sink_data)
{ {
uint32_t i = 0; uint32_t i = 0;
...@@ -118,11 +118,17 @@ static void dc_stream_construct(struct dc_stream_state *stream, ...@@ -118,11 +118,17 @@ static void dc_stream_construct(struct dc_stream_state *stream,
update_stream_signal(stream, dc_sink_data); update_stream_signal(stream, dc_sink_data);
stream->out_transfer_func = dc_create_transfer_func(); stream->out_transfer_func = dc_create_transfer_func();
if (stream->out_transfer_func == NULL) {
dc_sink_release(dc_sink_data);
return false;
}
stream->out_transfer_func->type = TF_TYPE_BYPASS; stream->out_transfer_func->type = TF_TYPE_BYPASS;
stream->out_transfer_func->ctx = stream->ctx; stream->out_transfer_func->ctx = stream->ctx;
stream->stream_id = stream->ctx->dc_stream_id_count; stream->stream_id = stream->ctx->dc_stream_id_count;
stream->ctx->dc_stream_id_count++; stream->ctx->dc_stream_id_count++;
return true;
} }
static void dc_stream_destruct(struct dc_stream_state *stream) static void dc_stream_destruct(struct dc_stream_state *stream)
...@@ -164,13 +170,20 @@ struct dc_stream_state *dc_create_stream_for_sink( ...@@ -164,13 +170,20 @@ struct dc_stream_state *dc_create_stream_for_sink(
stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
if (stream == NULL) if (stream == NULL)
return NULL; goto alloc_fail;
dc_stream_construct(stream, sink); if (dc_stream_construct(stream, sink) == false)
goto construct_fail;
kref_init(&stream->refcount); kref_init(&stream->refcount);
return stream; return stream;
construct_fail:
kfree(stream);
alloc_fail:
return NULL;
} }
struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
......
...@@ -689,7 +689,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u ...@@ -689,7 +689,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL; return -EINVAL;
} }
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type, 1 << workload_type,
NULL); NULL);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment