Commit 73b6f96c authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Fixes for 4.20:
- DC MST fixes
- DC FBC fix
- Vega20 updates to support the latest vbios
- KFD type fixes for ioctl headers
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181108035551.2904-1-alexander.deucher@amd.com
parents d10cf6da 63237f87
...@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe; ...@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery; extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode; extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size; extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info; extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
......
...@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; ...@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0; int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0; uint amdgpu_smu_memory_pool_size = 0;
/* FBC (bit 0) disabled by default*/
uint amdgpu_dc_feature_mask = 0;
struct amdgpu_mgpu_info mgpu_info = { struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
}; };
...@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644); ...@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
#endif #endif
/**
* DOC: dcfeaturemask (uint)
* Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
* The default is the current set of stable display features.
*/
MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
static const struct pci_device_id pciidlist[] = { static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
......
...@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev) ...@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
} }
return 0; return 0;
} }
......
...@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->asic_type < CHIP_RAVEN) adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true; init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
/* Display Core create. */ /* Display Core create. */
adev->dm.dc = dc_create(&init_data); adev->dm.dc = dc_create(&init_data);
...@@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) ...@@ -1524,13 +1527,6 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{ {
struct amdgpu_display_manager *dm = bl_get_data(bd); struct amdgpu_display_manager *dm = bl_get_data(bd);
/*
* PWM interperts 0 as 100% rather than 0% because of HW
* limitation for level 0.So limiting minimum brightness level
* to 1.
*/
if (bd->props.brightness < 1)
return 1;
if (dc_link_set_backlight_level(dm->backlight_link, if (dc_link_set_backlight_level(dm->backlight_link,
bd->props.brightness, 0, 0)) bd->props.brightness, 0, 0))
return 0; return 0;
...@@ -2707,18 +2703,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -2707,18 +2703,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_connector = &aconnector->base; drm_connector = &aconnector->base;
if (!aconnector->dc_sink) { if (!aconnector->dc_sink) {
/* if (!aconnector->mst_port) {
* Create dc_sink when necessary to MST sink = create_fake_sink(aconnector);
* Don't apply fake_sink to MST if (!sink)
*/ return stream;
if (aconnector->mst_port) {
dm_dp_mst_dc_sink_create(drm_connector);
return stream;
} }
sink = create_fake_sink(aconnector);
if (!sink)
return stream;
} else { } else {
sink = aconnector->dc_sink; sink = aconnector->dc_sink;
} }
...@@ -3308,7 +3297,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, ...@@ -3308,7 +3297,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = { static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane, .update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane, .disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup, .destroy = drm_primary_helper_destroy,
.reset = dm_drm_plane_reset, .reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state, .atomic_destroy_state = dm_drm_plane_destroy_state,
......
...@@ -160,8 +160,6 @@ struct amdgpu_dm_connector { ...@@ -160,8 +160,6 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock; struct mutex hpd_lock;
bool fake_enable; bool fake_enable;
bool mst_connected;
}; };
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
......
...@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { ...@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_get_property = amdgpu_dm_connector_atomic_get_property .atomic_get_property = amdgpu_dm_connector_atomic_get_property
}; };
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dc_sink *dc_sink;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
/* FIXME none of this is safe. we shouldn't touch aconnector here in
* atomic_check
*/
/*
* TODO: Need to further figure out why ddc.algo is NULL while MST port exists
*/
if (!aconnector->port || !aconnector->port->aux.ddc.algo)
return;
ASSERT(aconnector->edid);
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)aconnector->edid,
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
if (aconnector->dc_sink)
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
}
static int dm_dp_mst_get_modes(struct drm_connector *connector) static int dm_dp_mst_get_modes(struct drm_connector *connector)
{ {
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
...@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) ...@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder; struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder; struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs =
connector->base.helper_private;
struct drm_encoder *enc_master =
connector_funcs->best_encoder(&connector->base);
DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder) if (!amdgpu_encoder)
return NULL; return NULL;
...@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->mst_port == master
&& !aconnector->port) {
DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = port;
drm_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
return &aconnector->base;
}
}
drm_connector_list_iter_end(&conn_iter);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector) if (!aconnector)
...@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/ */
amdgpu_dm_connector_funcs_reset(connector); amdgpu_dm_connector_funcs_reset(connector);
aconnector->mst_connected = true;
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port); aconnector, connector->base.id, aconnector->mst_port);
...@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
...@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink = NULL; aconnector->dc_sink = NULL;
} }
aconnector->mst_connected = false; drm_connector_unregister(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
drm_connector_put(connector);
} }
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
...@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) ...@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector) static void dm_dp_mst_register_connector(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev) if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
...@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) ...@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_connector_register(connector); drm_connector_register(connector);
if (aconnector->mst_connected)
dm_dp_mst_link_status_reset(connector);
} }
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
......
...@@ -31,6 +31,5 @@ struct amdgpu_dm_connector; ...@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector); struct amdgpu_dm_connector *aconnector);
void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif #endif
...@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting( ...@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address, i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer)); buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0); slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success) if (!i2c_success)
/* Write failure */ /* Write failure */
...@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting( ...@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting(
i2c_success = i2c_write(pipe_ctx, slave_address, i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer)); buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0); slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success) if (!i2c_success)
/* Write failure */ /* Write failure */
......
...@@ -169,6 +169,7 @@ struct link_training_settings; ...@@ -169,6 +169,7 @@ struct link_training_settings;
struct dc_config { struct dc_config {
bool gpu_vm_support; bool gpu_vm_support;
bool disable_disp_pll_sharing; bool disable_disp_pll_sharing;
bool fbc_support;
}; };
enum visual_confirm { enum visual_confirm {
......
...@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, ...@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
if (events->force_trigger) if (events->force_trigger)
value |= 0x1; value |= 0x1;
value |= 0x84; if (num_pipes) {
struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
if (dc->fbc_compressor)
value |= 0x84;
}
for (i = 0; i < num_pipes; i++) for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs-> pipe_ctx[i]->stream_res.tg->funcs->
......
...@@ -1362,7 +1362,8 @@ static bool construct( ...@@ -1362,7 +1362,8 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL; pool->base.sw_i2cs[i] = NULL;
} }
dc->fbc_compressor = dce110_compressor_create(ctx); if (dc->config.fbc_support)
dc->fbc_compressor = dce110_compressor_create(ctx);
if (!underlay_create(ctx, &pool->base)) if (!underlay_create(ctx, &pool->base))
goto res_create_fail; goto res_create_fail;
......
...@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK { ...@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
PP_AVFS_MASK = 0x40000, PP_AVFS_MASK = 0x40000,
}; };
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
};
/** /**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/ */
......
...@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 { ...@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
struct atom_common_table_header table_header; struct atom_common_table_header table_header;
uint8_t smuip_min_ver; uint8_t smuip_min_ver;
uint8_t smuip_max_ver; uint8_t smuip_max_ver;
uint8_t smu_rsd1; uint8_t waflclk_ss_mode;
uint8_t gpuclk_ss_mode; uint8_t gpuclk_ss_mode;
uint16_t sclk_ss_percentage; uint16_t sclk_ss_percentage;
uint16_t sclk_ss_rate_10hz; uint16_t sclk_ss_rate_10hz;
...@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 { ...@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
uint32_t syspll3_1_vco_freq_10khz; uint32_t syspll3_1_vco_freq_10khz;
uint32_t bootup_fclk_10khz; uint32_t bootup_fclk_10khz;
uint32_t bootup_waflclk_10khz; uint32_t bootup_waflclk_10khz;
uint32_t reserved[3]; uint32_t smu_info_caps;
uint16_t waflclk_ss_percentage; // in unit of 0.001%
uint16_t smuinitoffset;
uint32_t reserved;
}; };
/* /*
......
...@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) ...@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1; data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0; data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100; data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.auto_wattman_threshold = 50; data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1; data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false; data->gfxoff_allowed = false;
...@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) ...@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
1);
return 0;
}
static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFclkGfxClkRatio,
data->registry_data.fclk_gfxclk_ratio);
}
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{ {
struct vega20_hwmgr *data = struct vega20_hwmgr *data =
...@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) ...@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to enable all smu features!", "[EnableDPMTasks] Failed to enable all smu features!",
return result); return result);
result = vega20_notify_smc_display_change(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to notify smc display change!",
return result);
result = vega20_send_clock_ratio(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to send clock ratio!",
return result);
/* Initialize UVD/VCE powergating state */ /* Initialize UVD/VCE powergating state */
vega20_init_powergate_state(hwmgr); vega20_init_powergate_state(hwmgr);
...@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, ...@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret; return ret;
} }
static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 1 : 0);
return 0;
}
int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req) struct pp_display_clock_request *clock_req)
{ {
...@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( ...@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req; struct pp_display_clock_request clock_req;
int ret = 0; int ret = 0;
if ((hwmgr->display_config->num_display > 1) &&
!hwmgr->display_config->multi_monitor_in_sync &&
!hwmgr->display_config->nb_pstate_switch_disable)
vega20_notify_smc_display_change(hwmgr, false);
else
vega20_notify_smc_display_change(hwmgr, true);
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
......
...@@ -328,6 +328,7 @@ struct vega20_registry_data { ...@@ -328,6 +328,7 @@ struct vega20_registry_data {
uint8_t disable_auto_wattman; uint8_t disable_auto_wattman;
uint32_t auto_wattman_debug; uint32_t auto_wattman_debug;
uint32_t auto_wattman_sample_period; uint32_t auto_wattman_sample_period;
uint32_t fclk_gfxclk_ratio;
uint8_t auto_wattman_threshold; uint8_t auto_wattman_threshold;
uint8_t log_avfs_param; uint8_t log_avfs_param;
uint8_t enable_enginess; uint8_t enable_enginess;
......
...@@ -105,7 +105,8 @@ ...@@ -105,7 +105,8 @@
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C #define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
#define PPSMC_MSG_WaflTest 0x4D #define PPSMC_MSG_WaflTest 0x4D
// Unused ID 0x4E to 0x50 #define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
// Unused ID 0x4F to 0x50
#define PPSMC_MSG_AllowGfxOff 0x51 #define PPSMC_MSG_AllowGfxOff 0x51
#define PPSMC_MSG_DisallowGfxOff 0x52 #define PPSMC_MSG_DisallowGfxOff 0x52
#define PPSMC_MSG_GetPptLimit 0x53 #define PPSMC_MSG_GetPptLimit 0x53
......
...@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args { ...@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
}; };
struct kfd_ioctl_get_queue_wave_state_args { struct kfd_ioctl_get_queue_wave_state_args {
uint64_t ctl_stack_address; /* to KFD */ __u64 ctl_stack_address; /* to KFD */
uint32_t ctl_stack_used_size; /* from KFD */ __u32 ctl_stack_used_size; /* from KFD */
uint32_t save_area_used_size; /* from KFD */ __u32 save_area_used_size; /* from KFD */
uint32_t queue_id; /* to KFD */ __u32 queue_id; /* to KFD */
uint32_t pad; __u32 pad;
}; };
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
...@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data { ...@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */ /* hw exception data */
struct kfd_hsa_hw_exception_data { struct kfd_hsa_hw_exception_data {
uint32_t reset_type; __u32 reset_type;
uint32_t reset_cause; __u32 reset_cause;
uint32_t memory_lost; __u32 memory_lost;
uint32_t gpu_id; __u32 gpu_id;
}; };
/* Event data */ /* Event data */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment