Commit 5f2c1192 authored by Roy Chan's avatar Roy Chan Committed by Alex Deucher

drm/amd/display: Support synchronized indirect reg access

[Why]
indirect register index/data pair may be used by multi-threads.  when it
happens, it would cause register access issue that is hard to trace.

[How]
Using cgs service, which provide a sync indirect reg access api.
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Reviewed-by: default avatarAric Cyr <Aric.Cyr@amd.com>
Acked-by: default avatarRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Signed-off-by: default avatarRoy Chan <roy.chan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9cc37043
...@@ -588,6 +588,66 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, ...@@ -588,6 +588,66 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
return reg_val; return reg_val;
} }
uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
uint32_t index, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...)
{
uint32_t shift, mask, field_value;
int i = 1;
va_list ap;
va_start(ap, field_value1);
reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
while (i < n) {
shift = va_arg(ap, uint32_t);
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
i++;
}
dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val);
va_end(ap);
return reg_val;
}
uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
uint32_t index, int n,
uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
...)
{
uint32_t shift, mask, *field_value;
uint32_t value = 0;
int i = 1;
va_list ap;
va_start(ap, field_value1);
value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index);
*field_value1 = get_reg_field_value_ex(value, mask1, shift1);
while (i < n) {
shift = va_arg(ap, uint32_t);
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t *);
*field_value = get_reg_field_value_ex(value, mask, shift);
i++;
}
va_end(ap);
return value;
}
void reg_sequence_start_gather(const struct dc_context *ctx) void reg_sequence_start_gather(const struct dc_context *ctx)
{ {
/* if reg sequence is supported and enabled, set flag to /* if reg sequence is supported and enabled, set flag to
......
...@@ -498,6 +498,40 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, ...@@ -498,6 +498,40 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...); ...);
/* indirect register access
* underlying implementation determines which index/data pair to be used
* in a synchronous way
*/
#define IX_REG_SET_N_SYNC(index, n, initial_val, ...) \
generic_indirect_reg_update_ex_sync(CTX, \
IND_REG(index), \
initial_val, \
n, __VA_ARGS__)
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
FN(reg, f2), v2)
#define IX_REG_GET_N_SYNC(index, n, ...) \
generic_indirect_reg_get_sync(CTX, \
IND_REG(index), \
n, __VA_ARGS__)
#define IX_REG_GET_SYNC(index, field, val) \
IX_REG_GET_N_SYNC(index, 1, \
FN(data_reg_name, field), val)
uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
uint32_t index, int n,
uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
...);
uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
uint32_t index, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...);
/* register offload macros /* register offload macros
* *
* instead of MMIO to register directly, in some cases we want * instead of MMIO to register directly, in some cases we want
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment