Commit 54f5499a authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amd/display: Refactor atomic commit implementation. (v2)

Modify amdgpu_dm_atomic_comit to implement
atomic_comit_tail hook.
Unify Buffer objects allocation and dealocation
for surface updates and page flips.
Simplify wait for fences and target_vbank logic
for non blockiing commit.
Remove hacky update surface to page flip synchronization
we had and rely on atomic framework synchronization logic.

v2:
Add state->allow_modeset as indicator of page flip call.
Signed-off-by: default avatarAndrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Acked-by: default avatarHarry Wentland <Harry.Wentland@amd.com>
Reviewed-by: default avatarTony Cheng <Tony.Cheng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 92a65e32
...@@ -149,7 +149,6 @@ static struct amdgpu_crtc *get_crtc_by_otg_inst( ...@@ -149,7 +149,6 @@ static struct amdgpu_crtc *get_crtc_by_otg_inst(
static void dm_pflip_high_irq(void *interrupt_params) static void dm_pflip_high_irq(void *interrupt_params)
{ {
struct amdgpu_flip_work *works;
struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params; struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev; struct amdgpu_device *adev = irq_params->adev;
...@@ -165,7 +164,6 @@ static void dm_pflip_high_irq(void *interrupt_params) ...@@ -165,7 +164,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
} }
spin_lock_irqsave(&adev->ddev->event_lock, flags); spin_lock_irqsave(&adev->ddev->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
...@@ -177,22 +175,24 @@ static void dm_pflip_high_irq(void *interrupt_params) ...@@ -177,22 +175,24 @@ static void dm_pflip_high_irq(void *interrupt_params)
return; return;
} }
/* page flip completed. clean up */
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
amdgpu_crtc->pflip_works = NULL;
/* wakeup usersapce */ /* wakeup usersapce */
if (works->event) if (amdgpu_crtc->event
drm_crtc_send_vblank_event(&amdgpu_crtc->base, && amdgpu_crtc->event->event.base.type
works->event); == DRM_EVENT_FLIP_COMPLETE) {
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
amdgpu_crtc->event = NULL;
} else
WARN_ON(1);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE, work: %p,\n", DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
__func__, amdgpu_crtc->crtc_id, amdgpu_crtc, works); __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
drm_crtc_vblank_put(&amdgpu_crtc->base); drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
} }
static void dm_crtc_high_irq(void *interrupt_params) static void dm_crtc_high_irq(void *interrupt_params)
...@@ -719,7 +719,11 @@ static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { ...@@ -719,7 +719,11 @@ static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_user_framebuffer_create, .fb_create = amdgpu_user_framebuffer_create,
.output_poll_changed = amdgpu_output_poll_changed, .output_poll_changed = amdgpu_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check, .atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit .atomic_commit = drm_atomic_helper_commit
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
}; };
void amdgpu_dm_update_connector_after_detect( void amdgpu_dm_update_connector_after_detect(
...@@ -1092,6 +1096,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) ...@@ -1092,6 +1096,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->mode_info.mode_config_initialized = true; adev->mode_info.mode_config_initialized = true;
adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev->ddev->mode_config.max_height = 16384;
...@@ -1345,6 +1350,14 @@ static void dm_page_flip(struct amdgpu_device *adev, ...@@ -1345,6 +1350,14 @@ static void dm_page_flip(struct amdgpu_device *adev,
acrtc = adev->mode_info.crtcs[crtc_id]; acrtc = adev->mode_info.crtcs[crtc_id];
stream = acrtc->stream; stream = acrtc->stream;
if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
/* In commit tail framework this cannot happen */
BUG_ON(0);
}
/* /*
* Received a page flip call after the display has been reset. * Received a page flip call after the display has been reset.
* Just return in this case. Everything should be clean-up on reset. * Just return in this case. Everything should be clean-up on reset.
...@@ -1359,15 +1372,28 @@ static void dm_page_flip(struct amdgpu_device *adev, ...@@ -1359,15 +1372,28 @@ static void dm_page_flip(struct amdgpu_device *adev,
addr.address.grph.addr.high_part = upper_32_bits(crtc_base); addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
addr.flip_immediate = async; addr.flip_immediate = async;
if (acrtc->base.state->event &&
acrtc->base.state->event->event.base.type ==
DRM_EVENT_FLIP_COMPLETE) {
acrtc->event = acrtc->base.state->event;
/* Set the flip status */
acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
/* Mark this event as consumed */
acrtc->base.state->event = NULL;
}
dc_flip_surface_addrs(adev->dm.dc,
dc_stream_get_status(stream)->surfaces,
&addr, 1);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__, __func__,
addr.address.grph.addr.high_part, addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part); addr.address.grph.addr.low_part);
dc_flip_surface_addrs(
adev->dm.dc,
dc_stream_get_status(stream)->surfaces,
&addr, 1);
} }
static int amdgpu_notify_freesync(struct drm_device *dev, void *data, static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
......
...@@ -687,8 +687,18 @@ static void dm_dc_surface_commit( ...@@ -687,8 +687,18 @@ static void dm_dc_surface_commit(
{ {
struct dc_surface *dc_surface; struct dc_surface *dc_surface;
const struct dc_surface *dc_surfaces[1]; const struct dc_surface *dc_surfaces[1];
const struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
const struct dc_stream *dc_stream = acrtc->stream; const struct dc_stream *dc_stream = acrtc->stream;
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
DRM_ERROR("dm_dc_surface_commit: acrtc %d, already busy\n", acrtc->crtc_id);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
/* In comit tail framework this cannot happen */
BUG_ON(0);
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
if (!dc_stream) { if (!dc_stream) {
dm_error( dm_error(
...@@ -1427,6 +1437,7 @@ static void clear_unrelated_fields(struct drm_plane_state *state) ...@@ -1427,6 +1437,7 @@ static void clear_unrelated_fields(struct drm_plane_state *state)
state->fence = NULL; state->fence = NULL;
} }
/*TODO update because event is always present now */
static bool page_flip_needed( static bool page_flip_needed(
const struct drm_plane_state *new_state, const struct drm_plane_state *new_state,
const struct drm_plane_state *old_state, const struct drm_plane_state *old_state,
...@@ -1482,11 +1493,13 @@ static bool page_flip_needed( ...@@ -1482,11 +1493,13 @@ static bool page_flip_needed(
page_flip_required = memcmp(&old_state_tmp, page_flip_required = memcmp(&old_state_tmp,
&new_state_tmp, &new_state_tmp,
sizeof(old_state_tmp)) == 0 ? true:false; sizeof(old_state_tmp)) == 0 ? true:false;
if (new_state->crtc && page_flip_required == false) { if (new_state->crtc && page_flip_required == false) {
acrtc_new = to_amdgpu_crtc(new_state->crtc); acrtc_new = to_amdgpu_crtc(new_state->crtc);
if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) if (acrtc_new->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
page_flip_required = true; page_flip_required = true;
} }
return page_flip_required; return page_flip_required;
} }
...@@ -1512,7 +1525,7 @@ static int dm_plane_helper_prepare_fb( ...@@ -1512,7 +1525,7 @@ static int dm_plane_helper_prepare_fb(
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL); r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
...@@ -1521,6 +1534,7 @@ static int dm_plane_helper_prepare_fb( ...@@ -1521,6 +1534,7 @@ static int dm_plane_helper_prepare_fb(
return r; return r;
} }
amdgpu_bo_ref(rbo);
return 0; return 0;
} }
...@@ -1544,7 +1558,10 @@ static void dm_plane_helper_cleanup_fb( ...@@ -1544,7 +1558,10 @@ static void dm_plane_helper_cleanup_fb(
} else { } else {
amdgpu_bo_unpin(rbo); amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
amdgpu_bo_unref(&rbo);
} }
afb->address = 0;
} }
int dm_create_validation_set_for_connector(struct drm_connector *connector, int dm_create_validation_set_for_connector(struct drm_connector *connector,
...@@ -2135,51 +2152,6 @@ static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state) ...@@ -2135,51 +2152,6 @@ static enum dm_commit_action get_dm_commit_action(struct drm_crtc_state *state)
} }
} }
typedef bool (*predicate)(struct amdgpu_crtc *acrtc);
static void wait_while_pflip_status(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc, predicate f) {
int count = 0;
while (f(acrtc)) {
/* Spin Wait*/
msleep(1);
count++;
if (count == 1000) {
DRM_ERROR("%s - crtc:%d[%p], pflip_stat:%d, probable hang!\n",
__func__, acrtc->crtc_id,
acrtc,
acrtc->pflip_status);
/* we do not expect to hit this case except on Polaris with PHY PLL
* 1. DP to HDMI passive dongle connected
* 2. unplug (headless)
* 3. plug in DP
* 3a. on plug in, DP will try verify link by training, and training
* would disable PHY PLL which HDMI rely on to drive TG
* 3b. this will cause flip interrupt cannot be generated, and we
* exit when timeout expired. however we do not have code to clean
* up flip, flip clean up will happen when the address is written
* with the restore mode change
*/
WARN_ON(1);
break;
}
}
DRM_DEBUG_DRIVER("%s - Finished waiting for:%d msec, crtc:%d[%p], pflip_stat:%d \n",
__func__,
count,
acrtc->crtc_id,
acrtc,
acrtc->pflip_status);
}
static bool pflip_in_progress_predicate(struct amdgpu_crtc *acrtc)
{
return acrtc->pflip_status != AMDGPU_FLIP_NONE;
}
static void manage_dm_interrupts( static void manage_dm_interrupts(
struct amdgpu_device *adev, struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc, struct amdgpu_crtc *acrtc,
...@@ -2201,8 +2173,6 @@ static void manage_dm_interrupts( ...@@ -2201,8 +2173,6 @@ static void manage_dm_interrupts(
&adev->pageflip_irq, &adev->pageflip_irq,
irq_type); irq_type);
} else { } else {
wait_while_pflip_status(adev, acrtc,
pflip_in_progress_predicate);
amdgpu_irq_put( amdgpu_irq_put(
adev, adev,
...@@ -2212,12 +2182,6 @@ static void manage_dm_interrupts( ...@@ -2212,12 +2182,6 @@ static void manage_dm_interrupts(
} }
} }
static bool pflip_pending_predicate(struct amdgpu_crtc *acrtc)
{
return acrtc->pflip_status == AMDGPU_FLIP_PENDING;
}
static bool is_scaling_state_different( static bool is_scaling_state_different(
const struct dm_connector_state *dm_state, const struct dm_connector_state *dm_state,
const struct dm_connector_state *old_dm_state) const struct dm_connector_state *old_dm_state)
...@@ -2255,116 +2219,94 @@ static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc) ...@@ -2255,116 +2219,94 @@ static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc)
acrtc->enabled = false; acrtc->enabled = false;
} }
int amdgpu_dm_atomic_commit(
struct drm_device *dev, /*
struct drm_atomic_state *state, * Executes flip
bool nonblock) *
* Waits on all BO's fences and for proper vblank count
*/
static void amdgpu_dm_do_flip(
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
uint32_t target)
{
unsigned long flags;
uint32_t target_vblank;
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
struct amdgpu_device *adev = crtc->dev->dev_private;
bool async_flip = (acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/*TODO This might fail and hence better not used, wait
* explicitly on fences instead
* and in general should be called for
* blocking commit to as per framework helpers
* */
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve buffer before flip\n");
BUG_ON(0);
}
/* Wait for all fences on this FB */
WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
MAX_SCHEDULE_TIMEOUT) < 0);
amdgpu_bo_unreserve(abo);
/* Wait for target vblank */
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
target_vblank = target - drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
while ((acrtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
usleep_range(1000, 1100);
}
/* Flip */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* update crtc fb */
crtc->primary->fb = fb;
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, acrtc->crtc_id, afb->address, async_flip);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
acrtc->crtc_id);
}
void amdgpu_dm_atomic_commit_tail(
struct drm_atomic_state *state)
{ {
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_display_manager *dm = &adev->dm;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_plane_state *old_plane_state; struct drm_plane_state *old_plane_state;
uint32_t i; uint32_t i;
int32_t ret = 0;
uint32_t commit_streams_count = 0; uint32_t commit_streams_count = 0;
uint32_t new_crtcs_count = 0; uint32_t new_crtcs_count = 0;
uint32_t flip_crtcs_count = 0;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *old_crtc_state;
const struct dc_stream *commit_streams[MAX_STREAMS]; const struct dc_stream *commit_streams[MAX_STREAMS];
struct amdgpu_crtc *new_crtcs[MAX_STREAMS]; struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
const struct dc_stream *new_stream; const struct dc_stream *new_stream;
struct drm_crtc *flip_crtcs[MAX_STREAMS]; unsigned long flags;
struct amdgpu_flip_work *work[MAX_STREAMS] = {0}; bool wait_for_vblank = true;
struct amdgpu_bo *new_abo[MAX_STREAMS] = {0};
/* In this step all new fb would be pinned */
/*
* TODO: Revisit when we support true asynchronous commit.
* Right now we receive async commit only from pageflip, in which case
* we should not pin/unpin the fb here, it should be done in
* amdgpu_crtc_flip and from the vblank irq handler.
*/
if (!nonblock) {
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
}
/* Page flip if needed */
for_each_plane_in_state(state, plane, new_plane_state, i) {
struct drm_plane_state *old_plane_state = plane->state;
struct drm_crtc *crtc = new_plane_state->crtc;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
if (!fb || !crtc)
continue;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (!crtc_state->planes_changed || !crtc_state->active)
continue;
if (page_flip_needed(
new_plane_state,
old_plane_state,
crtc_state->event,
false)) {
ret = amdgpu_crtc_prepare_flip(crtc,
fb,
crtc_state->event,
acrtc->flip_flags,
drm_crtc_vblank_count(crtc),
&work[flip_crtcs_count],
&new_abo[flip_crtcs_count]);
if (ret) {
/* According to atomic_commit hook API, EINVAL is not allowed */
if (unlikely(ret == -EINVAL))
ret = -ENOMEM;
DRM_ERROR("Atomic commit: Flip for crtc id %d: [%p], "
"failed, errno = %d\n",
acrtc->crtc_id,
acrtc,
ret);
/* cleanup all flip configurations which
* succeeded in this commit
*/
for (i = 0; i < flip_crtcs_count; i++)
amdgpu_crtc_cleanup_flip_ctx(
work[i],
new_abo[i]);
return ret;
}
flip_crtcs[flip_crtcs_count] = crtc;
flip_crtcs_count++;
}
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
drm_atomic_helper_swap_state(state, true);
/*
* From this point state become old state really. New state is
* initialized to appropriate objects and could be accessed from there
*/
/*
* there is no fences usage yet in state. We can skip the following line
* wait_for_fences(dev, state);
*/
drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_atomic_helper_update_legacy_modeset_state(dev, state);
...@@ -2520,11 +2462,11 @@ int amdgpu_dm_atomic_commit( ...@@ -2520,11 +2462,11 @@ int amdgpu_dm_atomic_commit(
for_each_plane_in_state(state, plane, old_plane_state, i) { for_each_plane_in_state(state, plane, old_plane_state, i) {
struct drm_plane_state *plane_state = plane->state; struct drm_plane_state *plane_state = plane->state;
struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc *crtc = plane_state->crtc;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = plane_state->fb; struct drm_framebuffer *fb = plane_state->fb;
struct drm_connector *connector; struct drm_connector *connector;
struct dm_connector_state *dm_state = NULL; struct dm_connector_state *dm_state = NULL;
enum dm_commit_action action; enum dm_commit_action action;
bool pflip_needed;
if (!fb || !crtc || !crtc->state->active) if (!fb || !crtc || !crtc->state->active)
continue; continue;
...@@ -2535,12 +2477,14 @@ int amdgpu_dm_atomic_commit( ...@@ -2535,12 +2477,14 @@ int amdgpu_dm_atomic_commit(
* 1. This commit is not a page flip. * 1. This commit is not a page flip.
* 2. This commit is a page flip, and streams are created. * 2. This commit is a page flip, and streams are created.
*/ */
if (!page_flip_needed( pflip_needed = !state->allow_modeset &&
page_flip_needed(
plane_state, plane_state,
old_plane_state, old_plane_state,
crtc->state->event, true) || crtc->state->event, true);
action == DM_COMMIT_ACTION_DPMS_ON || if (!pflip_needed ||
action == DM_COMMIT_ACTION_SET) { action == DM_COMMIT_ACTION_DPMS_ON ||
action == DM_COMMIT_ACTION_SET) {
list_for_each_entry(connector, list_for_each_entry(connector,
&dev->mode_config.connector_list, head) { &dev->mode_config.connector_list, head) {
if (connector->state->crtc == crtc) { if (connector->state->crtc == crtc) {
...@@ -2567,14 +2511,6 @@ int amdgpu_dm_atomic_commit( ...@@ -2567,14 +2511,6 @@ int amdgpu_dm_atomic_commit(
if (!dm_state) if (!dm_state)
continue; continue;
/*
* if flip is pending (ie, still waiting for fence to return
* before address is submitted) here, we cannot commit_surface
* as commit_surface will pre-maturely write out the future
* address. wait until flip is submitted before proceeding.
*/
wait_while_pflip_status(adev, acrtc, pflip_pending_predicate);
dm_dc_surface_commit(dm->dc, crtc); dm_dc_surface_commit(dm->dc, crtc);
} }
} }
...@@ -2594,43 +2530,77 @@ int amdgpu_dm_atomic_commit( ...@@ -2594,43 +2530,77 @@ int amdgpu_dm_atomic_commit(
} }
/* Do actual flip */
flip_crtcs_count = 0;
for_each_plane_in_state(state, plane, old_plane_state, i) { for_each_plane_in_state(state, plane, old_plane_state, i) {
struct drm_plane_state *plane_state = plane->state; struct drm_plane_state *plane_state = plane->state;
struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc *crtc = plane_state->crtc;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = plane_state->fb; struct drm_framebuffer *fb = plane_state->fb;
bool pflip_needed;
if (!fb || !crtc || !crtc->state->planes_changed || if (!fb || !crtc || !crtc->state->planes_changed ||
!crtc->state->active) !crtc->state->active)
continue; continue;
pflip_needed = !state->allow_modeset &&
if (page_flip_needed( page_flip_needed(
plane_state, plane_state,
old_plane_state, old_plane_state,
crtc->state->event, crtc->state->event,
false)) { false);
amdgpu_crtc_submit_flip(
crtc, if (pflip_needed) {
fb, amdgpu_dm_do_flip(
work[flip_crtcs_count], crtc,
new_abo[i]); fb,
flip_crtcs_count++; drm_crtc_vblank_count(crtc));
wait_for_vblank =
acrtc->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
false : true;
/*clean up the flags for next usage*/ /*clean up the flags for next usage*/
acrtc->flip_flags = 0; acrtc->flip_flags = 0;
} }
} }
/* In this state all old framebuffers would be unpinned */
/* TODO: Revisit when we support true asynchronous commit.*/ /*TODO mark consumed event on all crtc assigned event
if (!nonblock) * in drm_atomic_helper_setup_commit just to signal completion
drm_atomic_helper_cleanup_planes(dev, state); */
spin_lock_irqsave(&adev->ddev->event_lock, flags);
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
if (acrtc->base.state->event &&
acrtc->base.state->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
acrtc->event = acrtc->base.state->event;
acrtc->base.state->event = NULL;
}
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
/* Signal HW programming completion */
drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
drm_atomic_helper_wait_for_vblanks(dev, state);
/*TODO send vblank event on all crtc assigned event
* in drm_atomic_helper_setup_commit just to signal completion
*/
spin_lock_irqsave(&adev->ddev->event_lock, flags);
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
drm_atomic_state_put(state); if (acrtc->event &&
acrtc->event->event.base.type != DRM_EVENT_FLIP_COMPLETE) {
drm_send_event_locked(dev, &acrtc->event->base);
acrtc->event = NULL;
}
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
return ret; /*TODO Is it to early if actual flip haven't happened yet ?*/
/* Release old FB */
drm_atomic_helper_cleanup_planes(dev, state);
} }
/* /*
* This functions handle all cases when set mode does not come upon hotplug. * This functions handle all cases when set mode does not come upon hotplug.
...@@ -2982,6 +2952,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -2982,6 +2952,7 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_connector_state *dm_state = NULL; struct dm_connector_state *dm_state = NULL;
enum dm_commit_action action; enum dm_commit_action action;
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
bool pflip_needed;
if (!fb || !crtc || crtc_set[i] != crtc || if (!fb || !crtc || crtc_set[i] != crtc ||
...@@ -2995,10 +2966,12 @@ int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -2995,10 +2966,12 @@ int amdgpu_dm_atomic_check(struct drm_device *dev,
* 2. This commit is a page flip, and streams are created. * 2. This commit is a page flip, and streams are created.
*/ */
crtc_state = drm_atomic_get_crtc_state(state, crtc); crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (!page_flip_needed(plane_state, old_plane_state, pflip_needed = !state->allow_modeset &&
crtc_state->event, true) || page_flip_needed(plane_state, old_plane_state,
action == DM_COMMIT_ACTION_DPMS_ON || crtc_state->event, true);
action == DM_COMMIT_ACTION_SET) { if (!pflip_needed ||
action == DM_COMMIT_ACTION_DPMS_ON ||
action == DM_COMMIT_ACTION_SET) {
struct dc_surface *surface; struct dc_surface *surface;
list_for_each_entry(connector, list_for_each_entry(connector,
......
...@@ -52,10 +52,9 @@ void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder); ...@@ -52,10 +52,9 @@ void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
int amdgpu_dm_connector_get_modes(struct drm_connector *connector); int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
int amdgpu_dm_atomic_commit( void amdgpu_dm_atomic_commit_tail(
struct drm_device *dev, struct drm_atomic_state *state);
struct drm_atomic_state *state,
bool async);
int amdgpu_dm_atomic_check(struct drm_device *dev, int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state); struct drm_atomic_state *state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment