Commit 2d5bb791 authored by Wenjing Liu's avatar Wenjing Liu Committed by Alex Deucher

drm/amd/display: Implement update_planes_and_stream_v3 sequence

[WHY & HOW]
Update planes and stream version 3 separates FULL and FAST updates
to their own sequences. It aims to clean up frequent checks for
update type resulting unnecessary branching in logic flow. It also
adds a new commit minimal transition sequence, which detects the need
for minimal transition based on the actual comparison of current and
new states instead of "predicting" it based on per feature software
policy, i.e. could_mpcc_tree_change_for_active_pipes.

The new commit minimal transition sequence is made universal to any
power saving features that would use extra free pipes such as Dynamic
ODM/MPC Combine, MPO or SubVp. Therefore there is no longer a need to
specially handle compatibility problems with transitions among those
features as they are now transparent to the new sequence.
Reviewed-by: default avatarWenjing Liu <wenjing.liu@amd.com>
Acked-by: default avatarAlex Hung <alex.hung@amd.com>
Signed-off-by: default avatarWenjing Liu <wenjing.liu@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6bb89d13
......@@ -2118,7 +2118,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
static bool commit_minimal_transition_state_legacy(struct dc *dc,
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
......@@ -2184,7 +2184,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
if (handle_exit_odm2to1)
res = commit_minimal_transition_state_legacy(dc, dc->current_state);
res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_state_create_current_copy(dc);
if (!context)
......@@ -3082,6 +3082,63 @@ static void restore_planes_and_stream_state(
*stream->out_transfer_func = scratch->out_transfer_func;
}
/**
* update_seamless_boot_flags() - Helper function for updating seamless boot flags
*
* @dc: Current DC state
* @context: New DC state to be programmed
* @surface_count: Number of surfaces that have an updated
* @stream: Corresponding stream to be updated in the current flip
*
* Updating seamless boot flags do not need to be part of the commit sequence. This
* helper function will update the seamless boot flags on each flip (if required)
* outside of the HW commit sequence (fast or slow).
*
* Return: void
*/
static void update_seamless_boot_flags(struct dc *dc,
struct dc_state *context,
int surface_count,
struct dc_stream_state *stream)
{
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
* only light up a single display on POST, therefore we only expect
* one stream with seamless boot flag set.
*/
if (stream->apply_seamless_boot_optimization) {
stream->apply_seamless_boot_optimization = false;
if (get_seamless_boot_stream_count(context) == 0)
dc->optimized_required = true;
}
}
}
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
* is FULL, the function allocates a new context, populates and validates it.
* Otherwise, it updates current dc context. The function will return both
* new_context and new_update_type back to the caller. The function also backs
* up both current and new contexts into corresponding dc state scratch memory.
* TODO: The function does too many things, and even conditionally allocates dc
* context memory implicitly. We should consider to break it down.
*
* @dc: Current DC state
* @srf_updates: an array of surface updates
* @surface_count: surface update count
* @stream: Corresponding stream to be updated
* @stream_update: stream update
* @new_update_type: [out] determined update type by the function
* @new_context: [out] new context allocated and validated if update type is
* FULL, reference to current context if update type is less than FULL.
*
* Return: true if a valid update is populated into new_context, false
* otherwise.
*/
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
......@@ -3202,6 +3259,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
resource_build_test_pattern_params(&context->res_ctx, otg_master);
}
}
update_seamless_boot_flags(dc, context, surface_count, stream);
*new_context = context;
*new_update_type = update_type;
......@@ -4117,6 +4175,10 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
struct dc_state *minimal_transition_context = NULL;
unsigned int i, j;
minimal_transition_context = dc_state_create_copy(base_context);
if (!minimal_transition_context)
return NULL;
if (!dc->config.is_vmin_only_asic) {
policy->mpc_policy = dc->debug.pipe_split_policy;
dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
......@@ -4126,10 +4188,6 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
minimal_transition_context = dc_state_create_copy(base_context);
if (!minimal_transition_context)
return NULL;
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
for (i = 0; i < minimal_transition_context->stream_count; i++) {
......@@ -4153,69 +4211,178 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
return minimal_transition_context;
}
static bool is_pipe_topology_transition_seamless_with_intermediate_step(
struct dc *dc,
struct dc_state *initial_state,
struct dc_state *intermediate_state,
struct dc_state *final_state)
{
return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
intermediate_state) &&
dc->hwss.is_pipe_topology_transition_seamless(dc,
intermediate_state, final_state);
}
static void swap_and_free_current_context(struct dc *dc,
struct dc_state *new_context, struct dc_stream_state *stream)
{
int i;
struct dc_state *old = dc->current_state;
struct pipe_ctx *pipe_ctx;
/* Since memory free requires elevated IRQ, an interrupt
* request is generated by mem free. If this happens
* between freeing and reassigning the context, our vsync
* interrupt will call into dc and cause a memory
* corruption. Hence, we first reassign the context,
* then free the old context.
*/
dc->current_state = new_context;
dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
pipe_ctx->plane_state->force_full_update = false;
}
}
static int initialize_empty_surface_updates(
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates)
{
struct dc_stream_status *status = dc_stream_get_status(stream);
int i;
for (i = 0; i < status->plane_count; i++)
srf_updates[i].surface = status->plane_states[i];
return status->plane_count;
}
static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
struct dc_state *new_context,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
int surface_count)
{
bool success = false;
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context =
create_minimal_transition_state(dc, new_context,
&policy);
if (intermediate_context) {
if (is_pipe_topology_transition_seamless_with_intermediate_step(
dc,
dc->current_state,
intermediate_context,
new_context)) {
DC_LOG_DC("commit minimal transition state: base = new state\n");
commit_planes_for_stream(dc, srf_updates,
surface_count, stream, NULL,
UPDATE_TYPE_FULL, intermediate_context);
swap_and_free_current_context(
dc, intermediate_context, stream);
dc_state_retain(dc->current_state);
success = true;
}
release_minimal_transition_state(
dc, intermediate_context, &policy);
}
return success;
}
static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct dc_state *new_context, struct dc_stream_state *stream)
{
bool success = false;
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
int surface_count;
/*
* Both current and new contexts share the same stream and plane state
* pointers. When new context is validated, stream and planes get
* populated with new updates such as new plane addresses. This makes
* the current context no longer valid because stream and planes are
* modified from the original. We backup current stream and plane states
* into scratch space whenever we are populating new context. So we can
* restore the original values back by calling the restore function now.
* This restores back the original stream and plane states associated
* with the current state.
*/
restore_planes_and_stream_state(&dc->current_state->scratch, stream);
intermediate_context = create_minimal_transition_state(dc,
dc->current_state, &policy);
if (intermediate_context) {
if (is_pipe_topology_transition_seamless_with_intermediate_step(
dc,
dc->current_state,
intermediate_context,
new_context)) {
DC_LOG_DC("commit minimal transition state: base = current state\n");
surface_count = initialize_empty_surface_updates(
stream, srf_updates);
commit_planes_for_stream(dc, srf_updates,
surface_count, stream, NULL,
UPDATE_TYPE_FULL, intermediate_context);
swap_and_free_current_context(
dc, intermediate_context, stream);
dc_state_retain(dc->current_state);
success = true;
}
release_minimal_transition_state(dc, intermediate_context,
&policy);
}
/*
* Restore stream and plane states back to the values associated with
* new context.
*/
restore_planes_and_stream_state(&new_context->scratch, stream);
return success;
}
/**
* commit_minimal_transition_state - Commit a minimal state based on current or new context
* commit_minimal_transition_state_in_dc_update - Commit a minimal state based
* on current or new context
*
* @dc: DC structure, used to get the current state
* @context: New context
* @stream: Stream getting the update for the flip
*
* The function takes in current state and new state and determine a minimal transition state
* as the intermediate step which could make the transition between current and new states
* seamless. If found, it will commit the minimal transition state and update current state to
* this minimal transition state and return true, if not, it will return false.
* The function takes in current state and new state and determine a minimal
* transition state as the intermediate step which could make the transition
* between current and new states seamless. If found, it will commit the minimal
* transition state and update current state to this minimal transition state
* and return true, if not, it will return false.
*
* Return:
* Return True if the minimal transition succeeded, false otherwise
*/
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *context,
struct dc_stream_state *stream)
{
bool success = false;
struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
/* commit based on new context */
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
if (dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, minimal_transition_context) &&
dc->hwss.is_pipe_topology_transition_seamless(
dc, minimal_transition_context, context)) {
DC_LOG_DC("%s base = new state\n", __func__);
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
}
if (!success) {
/* commit based on current context */
restore_planes_and_stream_state(&dc->current_state->scratch, stream);
minimal_transition_context = create_minimal_transition_state(dc,
dc->current_state, &policy);
if (minimal_transition_context) {
if (dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, minimal_transition_context) &&
dc->hwss.is_pipe_topology_transition_seamless(
dc, minimal_transition_context, context)) {
DC_LOG_DC("%s base = current state\n", __func__);
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
}
restore_planes_and_stream_state(&context->scratch, stream);
}
ASSERT(success);
static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
struct dc_state *new_context,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
int surface_count)
{
bool success = commit_minimal_transition_based_on_new_context(
dc, new_context, stream, srf_updates,
surface_count);
if (!success)
success = commit_minimal_transition_based_on_current_context(dc,
new_context, stream);
if (!success)
DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
return success;
}
/**
* commit_minimal_transition_state_legacy - Create a transition pipe split state
* commit_minimal_transition_state - Create a transition pipe split state
*
* @dc: Used to get the current state status
* @transition_base_context: New transition state
......@@ -4232,7 +4399,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Return:
* Return false if something is wrong in the transition state.
*/
static bool commit_minimal_transition_state_legacy(struct dc *dc,
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context;
......@@ -4316,41 +4483,6 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc,
return true;
}
/**
* update_seamless_boot_flags() - Helper function for updating seamless boot flags
*
* @dc: Current DC state
* @context: New DC state to be programmed
* @surface_count: Number of surfaces that have an updated
* @stream: Corresponding stream to be updated in the current flip
*
* Updating seamless boot flags do not need to be part of the commit sequence. This
* helper function will update the seamless boot flags on each flip (if required)
* outside of the HW commit sequence (fast or slow).
*
* Return: void
*/
static void update_seamless_boot_flags(struct dc *dc,
struct dc_state *context,
int surface_count,
struct dc_stream_state *stream)
{
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
* only light up a single display on POST, therefore we only expect
* one stream with seamless boot flag set.
*/
if (stream->apply_seamless_boot_optimization) {
stream->apply_seamless_boot_optimization = false;
if (get_seamless_boot_stream_count(context) == 0)
dc->optimized_required = true;
}
}
}
static void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
......@@ -4470,123 +4602,9 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
bool dc_update_planes_and_stream(struct dc *dc,
static bool update_planes_and_stream_v1(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
enum surface_update_type update_type;
int i;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
bool is_fast_update_only;
dc_exit_ips_for_hw_access(dc);
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
srf_updates,
surface_count,
&is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
!commit_minimal_transition_state_legacy(dc, dc->current_state))
return false;
if (!update_planes_and_stream_state(
dc,
srf_updates,
surface_count,
stream,
stream_update,
&update_type,
&context))
return false;
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state_legacy(dc, context)) {
dc_state_release(context);
return false;
}
update_type = UPDATE_TYPE_FULL;
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
!dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, context)) {
commit_minimal_transition_state(dc,
context, stream);
}
update_seamless_boot_flags(dc, context, surface_count, stream);
if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
context);
} else {
if (!stream_update &&
dc->hwss.is_pipe_topology_transition_seamless &&
!dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, context)) {
DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
BREAK_TO_DEBUGGER();
}
commit_planes_for_stream(
dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
context);
}
if (dc->current_state != context) {
/* Since memory free requires elevated IRQL, an interrupt
* request is generated by mem free. If this happens
* between freeing and reassigning the context, our vsync
* interrupt will call into dc and cause a memory
* corruption BSOD. Hence, we first reassign the context,
* then free the old context.
*/
struct dc_state *old = dc->current_state;
dc->current_state = context;
dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
pipe_ctx->plane_state->force_full_update = false;
}
}
return true;
}
void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
......@@ -4606,35 +4624,13 @@ void dc_commit_updates_for_stream(struct dc *dc,
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
/*
* Previous frame finished and HW is ready for optimization.
*/
if (update_type == UPDATE_TYPE_FAST)
dc_post_update_surfaces_to_stream(dc);
dc_update_planes_and_stream(dc, srf_updates,
surface_count, stream,
stream_update);
return;
}
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
return false;
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
......@@ -4651,7 +4647,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
}
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
......@@ -4676,13 +4671,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return;
return false;
}
}
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
update_seamless_boot_flags(dc, context, surface_count, stream);
if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
!dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
......@@ -4723,9 +4717,252 @@ void dc_commit_updates_for_stream(struct dc *dc,
dc_post_update_surfaces_to_stream(dc);
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
return true;
}
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *context;
enum surface_update_type update_type;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
bool is_fast_update_only;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
srf_updates,
surface_count,
&is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
!commit_minimal_transition_state(dc, dc->current_state))
return false;
if (!update_planes_and_stream_state(
dc,
srf_updates,
surface_count,
stream,
stream_update,
&update_type,
&context))
return false;
/* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_state_release(context);
return false;
}
update_type = UPDATE_TYPE_FULL;
}
if (dc->hwss.is_pipe_topology_transition_seamless &&
!dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, context))
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
context);
} else {
if (!stream_update &&
dc->hwss.is_pipe_topology_transition_seamless &&
!dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, context)) {
DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
BREAK_TO_DEBUGGER();
}
commit_planes_for_stream(
dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
context);
}
if (dc->current_state != context)
swap_and_free_current_context(dc, context, stream);
return true;
}
return;
static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
enum surface_update_type update_type)
{
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
ASSERT(update_type < UPDATE_TYPE_FULL);
populate_fast_updates(fast_update, srf_updates, surface_count,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
!dc->debug.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
dc->current_state);
else
commit_planes_for_stream(
dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
dc->current_state);
}
static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
enum surface_update_type update_type,
struct dc_state *new_context)
{
ASSERT(update_type >= UPDATE_TYPE_FULL);
if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
dc->current_state, new_context))
/*
* It is required by the feature design that all pipe topologies
* using extra free pipes for power saving purposes such as
* dynamic ODM or SubVp shall only be enabled when it can be
* transitioned seamlessly to AND from its minimal transition
* state. A minimal transition state is defined as the same dc
* state but with all power saving features disabled. So it uses
* the minimum pipe topology. When we can't seamlessly
* transition from state A to state B, we will insert the
* minimal transition state A' or B' in between so seamless
* transition between A and B can be made possible.
*/
commit_minimal_transition_state_in_dc_update(dc, new_context,
stream, srf_updates, surface_count);
commit_planes_for_stream(
dc,
srf_updates,
surface_count,
stream,
stream_update,
update_type,
new_context);
}
static bool update_planes_and_stream_v3(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
struct dc_state *new_context;
enum surface_update_type update_type;
/*
* When this function returns true and new_context is not equal to
* current state, the function allocates and validates a new dc state
* and assigns it to new_context. The function expects that the caller
* is responsible to free this memory when new_context is no longer
* used. We swap current with new context and free current instead. So
* new_context's memory will live until the next full update after it is
* replaced by a newer context. Refer to the use of
* swap_and_free_current_context below.
*/
if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
stream, stream_update, &update_type,
&new_context))
return false;
if (new_context == dc->current_state) {
commit_planes_and_stream_update_on_current_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type);
} else {
commit_planes_and_stream_update_with_new_context(dc,
srf_updates, surface_count, stream,
stream_update, update_type, new_context);
swap_and_free_current_context(dc, new_context, stream);
}
return true;
}
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
dc_exit_ips_for_hw_access(dc);
/*
* update planes and stream version 3 separates FULL and FAST updates
* to their own sequences. It aims to clean up frequent checks for
* update type resulting unnecessary branching in logic flow. It also
* adds a new commit minimal transition sequence, which detects the need
* for minimal transition based on the actual comparison of current and
* new states instead of "predicting" it based on per feature software
* policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
* minimal transition sequence is made universal to any power saving
* features that would use extra free pipes such as Dynamic ODM/MPC
* Combine, MPO or SubVp. Therefore there is no longer a need to
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
if (dc->ctx->dce_version > DCN_VERSION_3_51)
return update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
return update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
}
void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
dc_exit_ips_for_hw_access(dc);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version > DCN_VERSION_3_51) {
update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
return;
}
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
return;
}
update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
stream_update, state);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment