Commit 70470154 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amd/display: Get rid of seperate flip function.

This code is remanant of pre atomic age when flip was a
standalone IOCTL.
Signed-off-by: default avatarAndrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Reviewed-by: default avatarTony Cheng <Tony.Cheng@amd.com>
Reviewed-by: default avatarHarry Wentland <Harry.Wentland@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3f6d7435
......@@ -1328,102 +1328,6 @@ static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
return 0;
}
/******************************************************************************
* Page Flip functions
******************************************************************************/
/**
* dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
* via DRM IOCTL, by user mode.
*
* @adev: amdgpu_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
*
* Does the actual pageflip (surface address update).
*/
static void dm_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *acrtc;
const struct dc_stream *stream;
struct dc_flip_addrs addr = { {0} };
struct dc_surface_update surface_updates[1] = { {0} };
/*
* TODO risk of concurrency issues
*
* This should guarded by the dal_mutex but we can't do this since the
* caller uses a spin_lock on event_lock.
*
* If we wait on the dal_mutex a second page flip interrupt might come,
* spin on the event_lock, disabling interrupts while it does so. At
* this point the core can no longer be pre-empted and return to the
* thread that waited on the dal_mutex and we're deadlocked.
*
* With multiple cores the same essentially happens but might just take
* a little longer to lock up all cores.
*
* The reason we should lock on dal_mutex is so that we can be sure
* nobody messes with acrtc->stream after we read and check its value.
*
* We might be able to fix our concurrency issues with a work queue
* where we schedule all work items (mode_set, page_flip, etc.) and
* execute them one by one. Care needs to be taken to still deal with
* any potential concurrency issues arising from interrupt calls.
*/
acrtc = adev->mode_info.crtcs[crtc_id];
stream = acrtc->stream;
if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
/* In commit tail framework this cannot happen */
BUG_ON(0);
}
/*
* Received a page flip call after the display has been reset.
* Just return in this case. Everything should be clean-up on reset.
*/
if (!stream) {
WARN_ON(1);
return;
}
addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
addr.flip_immediate = async;
if (acrtc->base.state->event &&
acrtc->base.state->event->event.base.type ==
DRM_EVENT_FLIP_COMPLETE) {
acrtc->event = acrtc->base.state->event;
/* Set the flip status */
acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
/* Mark this event as consumed */
acrtc->base.state->event = NULL;
}
surface_updates->surface = dc_stream_get_status(stream)->surfaces[0];
surface_updates->flip_addr = &addr;
dc_update_surfaces_for_stream(adev->dm.dc, surface_updates, 1, stream);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part);
}
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
......@@ -1460,7 +1364,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
.page_flip = dm_page_flip, /* called unconditionally */
.page_flip_get_scanoutpos =
dm_crtc_get_scanoutpos,/* called unconditionally */
.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
......
......@@ -2198,6 +2198,8 @@ static void amdgpu_dm_do_flip(
struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
struct amdgpu_device *adev = crtc->dev->dev_private;
bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
struct dc_flip_addrs addr = { {0} };
struct dc_surface_update surface_updates[1] = { {0} };
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - drm_crtc_vblank_count(crtc) +
......@@ -2211,7 +2213,7 @@ static void amdgpu_dm_do_flip(
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve buffer before flip\n");
BUG_ON(0);
WARN_ON(1);
}
/* Wait for all fences on this FB */
......@@ -2239,8 +2241,37 @@ static void amdgpu_dm_do_flip(
/* update crtc fb */
crtc->primary->fb = fb;
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
WARN_ON(!acrtc->stream);
addr.address.grph.addr.low_part = lower_32_bits(afb->address);
addr.address.grph.addr.high_part = upper_32_bits(afb->address);
addr.flip_immediate = async_flip;
if (acrtc->base.state->event &&
acrtc->base.state->event->event.base.type ==
DRM_EVENT_FLIP_COMPLETE) {
acrtc->event = acrtc->base.state->event;
/* Set the flip status */
acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
/* Mark this event as consumed */
acrtc->base.state->event = NULL;
}
surface_updates->surface = dc_stream_get_status(acrtc->stream)->surfaces[0];
surface_updates->flip_addr = &addr;
dc_update_surfaces_for_stream(adev->dm.dc, surface_updates, 1, acrtc->stream);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment