Commit c22fe762 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-next-5.5-2019-11-15' of git://people.freedesktop.org/~agd5f/linux into drm-next

drm-next-5.5-2019-11-15:

amdgpu:
- Fix AVFS handling on SMU7 parts with custom power tables
- Enable Overdrive sysfs interface for Navi parts
- Fix power limit handling on smu11 parts
- Fix pcie link sysfs output for Navi
- Probably cancel MM worker threads on shutdown

radeon:
- Cleanup for ppc change
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191115163516.3714-1-alexander.deucher@amd.com
parents 17cc5139 622b2a0a
...@@ -3110,6 +3110,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) ...@@ -3110,6 +3110,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n"); DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true; adev->shutdown = true;
flush_delayed_work(&adev->delayed_init_work);
/* disable all interrupts */ /* disable all interrupts */
amdgpu_irq_disable_all(adev); amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){ if (adev->mode_info.mode_config_initialized){
......
...@@ -567,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) ...@@ -567,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd; struct ta_xgmi_shared_memory *xgmi_cmd;
int ret; int ret;
if (!psp->adev->psp.ta_fw) if (!psp->adev->psp.ta_fw ||
!psp->adev->psp.ta_xgmi_ucode_size ||
!psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT; return -ENOENT;
if (!psp->xgmi_context.initialized) { if (!psp->xgmi_context.initialized) {
...@@ -777,6 +779,12 @@ static int psp_ras_initialize(struct psp_context *psp) ...@@ -777,6 +779,12 @@ static int psp_ras_initialize(struct psp_context *psp)
{ {
int ret; int ret;
if (!psp->adev->psp.ta_ras_ucode_size ||
!psp->adev->psp.ta_ras_start_addr) {
dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
return 0;
}
if (!psp->ras.ras_initialized) { if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp); ret = psp_ras_init_shared_buf(psp);
if (ret) if (ret)
...@@ -866,6 +874,12 @@ static int psp_hdcp_initialize(struct psp_context *psp) ...@@ -866,6 +874,12 @@ static int psp_hdcp_initialize(struct psp_context *psp)
{ {
int ret; int ret;
if (!psp->adev->psp.ta_hdcp_ucode_size ||
!psp->adev->psp.ta_hdcp_start_addr) {
dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
return 0;
}
if (!psp->hdcp_context.hdcp_initialized) { if (!psp->hdcp_context.hdcp_initialized) {
ret = psp_hdcp_init_shared_buf(psp); ret = psp_hdcp_init_shared_buf(psp);
if (ret) if (ret)
...@@ -1039,6 +1053,12 @@ static int psp_dtm_initialize(struct psp_context *psp) ...@@ -1039,6 +1053,12 @@ static int psp_dtm_initialize(struct psp_context *psp)
{ {
int ret; int ret;
if (!psp->adev->psp.ta_dtm_ucode_size ||
!psp->adev->psp.ta_dtm_start_addr) {
dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
return 0;
}
if (!psp->dtm_context.dtm_initialized) { if (!psp->dtm_context.dtm_initialized) {
ret = psp_dtm_init_shared_buf(psp); ret = psp_dtm_init_shared_buf(psp);
if (ret) if (ret)
......
...@@ -299,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -299,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity); drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
......
...@@ -216,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) ...@@ -216,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL) if (adev->vce.vcpu_bo == NULL)
return 0; return 0;
cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity); drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
......
...@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) ...@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.indirect_sram) { if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_gpu_addr,
......
...@@ -1068,10 +1068,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1068,10 +1068,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret; return ret;
if (adev->asic_type != CHIP_ARCTURUS) { if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_override_pcie_parameters(smu);
if (ret)
return ret;
ret = smu_notify_display_change(smu); ret = smu_notify_display_change(smu);
if (ret) if (ret)
return ret; return ret;
...@@ -1100,6 +1096,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1100,6 +1096,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret; return ret;
} }
if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_override_pcie_parameters(smu);
if (ret)
return ret;
}
ret = smu_set_default_od_settings(smu, initialize); ret = smu_set_default_od_settings(smu, initialize);
if (ret) if (ret)
return ret; return ret;
...@@ -1109,7 +1111,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1109,7 +1111,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false); ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2511,3 +2513,13 @@ int smu_get_dpm_clock_table(struct smu_context *smu, ...@@ -2511,3 +2513,13 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
return ret; return ret;
} }
uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
{
uint32_t ret = 0;
if (smu->ppt_funcs->get_pptable_power_limit)
ret = smu->ppt_funcs->get_pptable_power_limit(smu);
return ret;
}
...@@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu, ...@@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu, static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit, uint32_t *limit,
bool asic_default) bool cap)
{ {
PPTable_t *pptable = smu->smu_table.driver_pptable; PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0; uint32_t asic_default_power_limit = 0;
int ret = 0; int ret = 0;
int power_src; int power_src;
if (!smu->default_power_limit || if (!smu->power_limit) {
!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0) if (power_src < 0)
...@@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, ...@@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
} }
if (smu->od_enabled) {
asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
asic_default_power_limit /= 100;
}
smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit; smu->power_limit = asic_default_power_limit;
} }
if (asic_default) if (cap)
*limit = smu->default_power_limit; *limit = smu_v11_0_get_max_power_limit(smu);
else else
*limit = smu->power_limit; *limit = smu->power_limit;
...@@ -2070,6 +2063,13 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) ...@@ -2070,6 +2063,13 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
i2c_del_adapter(control); i2c_del_adapter(control);
} }
static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
static const struct pptable_funcs arcturus_ppt_funcs = { static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */ /* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index, .get_smu_msg_index = arcturus_get_smu_msg_index,
...@@ -2160,6 +2160,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { ...@@ -2160,6 +2160,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters, .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
}; };
void arcturus_set_ppt_funcs(struct smu_context *smu) void arcturus_set_ppt_funcs(struct smu_context *smu)
......
...@@ -3969,6 +3969,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) ...@@ -3969,6 +3969,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!", "Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result); result = tmp_result);
/*
* If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
* That effectively disables AVFS feature.
*/
if (hwmgr->hardcode_pp_table != NULL)
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
tmp_result = smu7_update_avfs(hwmgr); tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result), PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!", "Failed to update avfs voltages!",
......
...@@ -261,7 +261,6 @@ struct smu_table_context ...@@ -261,7 +261,6 @@ struct smu_table_context
struct smu_table *tables; struct smu_table *tables;
struct smu_table memory_pool; struct smu_table memory_pool;
uint8_t thermal_controller_type; uint8_t thermal_controller_type;
uint16_t TDPODLimit;
void *overdrive_table; void *overdrive_table;
}; };
...@@ -548,6 +547,7 @@ struct pptable_funcs { ...@@ -548,6 +547,7 @@ struct pptable_funcs {
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
int (*override_pcie_parameters)(struct smu_context *smu); int (*override_pcie_parameters)(struct smu_context *smu);
uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
}; };
int smu_load_microcode(struct smu_context *smu); int smu_load_microcode(struct smu_context *smu);
...@@ -717,4 +717,6 @@ int smu_get_uclk_dpm_states(struct smu_context *smu, ...@@ -717,4 +717,6 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
int smu_get_dpm_clock_table(struct smu_context *smu, int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table); struct dpm_clocks *clock_table);
uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif #endif
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000 #define SMU11_TOOL_SIZE 0x19000
#define MAX_PCIE_CONF 2
#define CLK_MAP(clk, index) \ #define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)} [SMU_##clk] = {1, (index)}
...@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table { ...@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */ uint32_t max; /* MHz */
}; };
struct smu_11_0_pcie_table {
uint8_t pcie_gen[MAX_PCIE_CONF];
uint8_t pcie_lane[MAX_PCIE_CONF];
};
struct smu_11_0_dpm_tables { struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table; struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table; struct smu_11_0_dpm_table gfx_table;
...@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables { ...@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table; struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table; struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table; struct smu_11_0_dpm_table fclk_table;
struct smu_11_0_pcie_table pcie_table;
}; };
struct smu_11_0_dpm_context { struct smu_11_0_dpm_context {
...@@ -250,4 +258,8 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ ...@@ -250,4 +258,8 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v11_0_override_pcie_parameters(struct smu_context *smu); int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
#endif #endif
...@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table ...@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock; struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table; struct smu_11_0_overdrive_table overdrive_table;
#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
#endif
} __attribute__((packed)); } __attribute__((packed));
#endif #endif
This diff is collapsed.
...@@ -33,6 +33,11 @@ ...@@ -33,6 +33,11 @@
#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) #define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) #define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
#define NAVI10_VOLTAGE_SCALE (4)
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
extern void navi10_set_ppt_funcs(struct smu_context *smu); extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif #endif
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#define SMU_11_0_PARTIAL_PPTABLE
#include "pp_debug.h" #include "pp_debug.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_smu.h" #include "amdgpu_smu.h"
...@@ -31,6 +33,7 @@ ...@@ -31,6 +33,7 @@
#include "atomfirmware.h" #include "atomfirmware.h"
#include "amdgpu_atomfirmware.h" #include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h" #include "smu_v11_0.h"
#include "smu_v11_0_pptable.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "atom.h" #include "atom.h"
#include "amd_pcie.h" #include "amd_pcie.h"
...@@ -1045,13 +1048,44 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) ...@@ -1045,13 +1048,44 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0; return 0;
} }
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
uint32_t od_limit, max_power_limit;
struct smu_11_0_powerplay_table *powerplay_table = NULL;
struct smu_table_context *table_context = &smu->smu_table;
powerplay_table = table_context->power_play_table;
max_power_limit = smu_get_pptable_power_limit(smu);
if (!max_power_limit) {
// If we couldn't get the table limit, fall back on first-read value
if (!smu->default_power_limit)
smu->default_power_limit = smu->power_limit;
max_power_limit = smu->default_power_limit;
}
if (smu->od_enabled) {
od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
max_power_limit *= (100 + od_limit);
max_power_limit /= 100;
}
return max_power_limit;
}
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{ {
int ret = 0; int ret = 0;
uint32_t max_power_limit;
if (n > smu->default_power_limit) { max_power_limit = smu_v11_0_get_max_power_limit(smu);
pr_err("New power limit is over the max allowed %d\n",
smu->default_power_limit); if (n > max_power_limit) {
pr_err("New power limit (%d) is over the max allowed %d\n",
n,
max_power_limit);
return -EINVAL; return -EINVAL;
} }
...@@ -1779,3 +1813,30 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu) ...@@ -1779,3 +1813,30 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
return ret; return ret;
} }
int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
if (initialize) {
if (table_context->overdrive_table) {
return -EINVAL;
}
table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
if (!table_context->overdrive_table) {
return -ENOMEM;
}
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export overdrive table!\n");
return ret;
}
}
ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import overdrive table!\n");
return ret;
}
return ret;
}
...@@ -466,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) ...@@ -466,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t)); sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0; return 0;
} }
......
...@@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev) ...@@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev)
static void static void
radeon_pci_shutdown(struct pci_dev *pdev) radeon_pci_shutdown(struct pci_dev *pdev)
{ {
#ifdef CONFIG_PPC64
struct drm_device *ddev = pci_get_drvdata(pdev);
#endif
/* if we are running in a VM, make sure the device /* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown * torn down properly on reboot/shutdown
*/ */
...@@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev) ...@@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev)
radeon_pci_remove(pdev); radeon_pci_remove(pdev);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* Some adapters need to be suspended before a /*
* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error * shutdown occurs in order to prevent an error
* during kexec. * during kexec.
* Make this power specific becauase it breaks * Make this power specific becauase it breaks
* some non-power boards. * some non-power boards.
*/ */
radeon_suspend_kms(ddev, true, true, false); radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment