Commit e4f6b39e authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: remove *_mc_access from display funcs

These are no longer needed now that we use the fb_location
programmed by the vbios.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b3fba8ad
...@@ -613,7 +613,6 @@ struct amdgpu_mc { ...@@ -613,7 +613,6 @@ struct amdgpu_mc {
struct amdgpu_irq_src vm_fault; struct amdgpu_irq_src vm_fault;
uint32_t vram_type; uint32_t vram_type;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
struct amdgpu_mode_mc_save save;
bool prt_warning; bool prt_warning;
uint64_t stolen_size; uint64_t stolen_size;
/* apertures */ /* apertures */
...@@ -1910,8 +1909,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1910,8 +1909,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
......
...@@ -2851,12 +2851,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -2851,12 +2851,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
r = amdgpu_suspend(adev); r = amdgpu_suspend(adev);
retry: retry:
/* Disable fb access */
if (adev->mode_info.num_crtc) {
struct amdgpu_mode_mc_save save;
amdgpu_display_stop_mc_access(adev, &save);
amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
}
if (adev->is_atom_fw) if (adev->is_atom_fw)
amdgpu_atomfirmware_scratch_regs_save(adev); amdgpu_atomfirmware_scratch_regs_save(adev);
else else
......
...@@ -257,12 +257,6 @@ struct amdgpu_audio { ...@@ -257,12 +257,6 @@ struct amdgpu_audio {
int num_pins; int num_pins;
}; };
struct amdgpu_mode_mc_save {
u32 vga_render_control;
u32 vga_hdp_control;
bool crtc_enabled[AMDGPU_MAX_CRTCS];
};
struct amdgpu_display_funcs { struct amdgpu_display_funcs {
/* display watermarks */ /* display watermarks */
void (*bandwidth_update)(struct amdgpu_device *adev); void (*bandwidth_update)(struct amdgpu_device *adev);
...@@ -298,10 +292,6 @@ struct amdgpu_display_funcs { ...@@ -298,10 +292,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id, uint16_t connector_object_id,
struct amdgpu_hpd *hpd, struct amdgpu_hpd *hpd,
struct amdgpu_router *router); struct amdgpu_router *router);
void (*stop_mc_access)(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save);
void (*resume_mc_access)(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save);
}; };
struct amdgpu_mode_info { struct amdgpu_mode_info {
......
...@@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) ...@@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
return true; return true;
} }
static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 crtc_enabled, tmp;
int i;
save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32(mmVGA_RENDER_CONTROL, tmp);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
#if 0
u32 frame_count;
int j;
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
amdgpu_display_vblank_wait(adev, i);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
#endif
} else {
save->crtc_enabled[i] = false;
}
}
}
static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp, frame_count;
int i, j;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
break;
udelay(1);
}
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
/* wait for the next frame */
frame_count = amdgpu_display_vblank_get_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
break;
udelay(1);
}
}
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
/* Unlock vga access */
WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
bool render) bool render)
{ {
...@@ -3751,8 +3623,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { ...@@ -3751,8 +3623,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
.add_encoder = &dce_v10_0_encoder_add, .add_encoder = &dce_v10_0_encoder_add,
.add_connector = &amdgpu_connector_add, .add_connector = &amdgpu_connector_add,
.stop_mc_access = &dce_v10_0_stop_mc_access,
.resume_mc_access = &dce_v10_0_resume_mc_access,
}; };
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
......
...@@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) ...@@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
return true; return true;
} }
static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 crtc_enabled, tmp;
int i;
save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32(mmVGA_RENDER_CONTROL, tmp);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
/*it is correct only for RGB ; black is 0*/
WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
}
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
#endif
} else {
save->crtc_enabled[i] = false;
}
}
}
static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp;
int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
}
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
/* Unlock vga access */
WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
bool render) bool render)
{ {
...@@ -3820,8 +3747,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { ...@@ -3820,8 +3747,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
.add_encoder = &dce_v11_0_encoder_add, .add_encoder = &dce_v11_0_encoder_add,
.add_connector = &amdgpu_connector_add, .add_connector = &amdgpu_connector_add,
.stop_mc_access = &dce_v11_0_stop_mc_access,
.resume_mc_access = &dce_v11_0_resume_mc_access,
}; };
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
......
...@@ -392,117 +392,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) ...@@ -392,117 +392,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A; return mmDC_GPIO_HPD_A;
} }
static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count;
int i, j;
save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
WREG32(mmVGA_RENDER_CONTROL, 0);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
if (crtc_enabled) {
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
dce_v6_0_vblank_wait(adev, i);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = evergreen_get_vblank_counter(adev, i);
for (j = 0; j < adev->usec_timeout; j++) {
if (evergreen_get_vblank_counter(adev, i) != frame_count)
break;
udelay(1);
}
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
} else {
save->crtc_enabled[i] = false;
}
}
}
static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp;
int i, j;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
/* unlock regs and wait for update */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x7) != 0) {
tmp &= ~0x7;
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
if (tmp & 1) {
tmp &= ~1;
WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
break;
udelay(1);
}
}
}
/* Unlock vga access */
WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render) bool render)
{ {
...@@ -3539,8 +3428,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { ...@@ -3539,8 +3428,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
.add_encoder = &dce_v6_0_encoder_add, .add_encoder = &dce_v6_0_encoder_add,
.add_connector = &amdgpu_connector_add, .add_connector = &amdgpu_connector_add,
.stop_mc_access = &dce_v6_0_stop_mc_access,
.resume_mc_access = &dce_v6_0_resume_mc_access,
}; };
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
......
...@@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) ...@@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
return true; return true;
} }
static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 crtc_enabled, tmp;
int i;
save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32(mmVGA_RENDER_CONTROL, tmp);
/* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
/*it is correct only for RGB ; black is 0*/
WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
}
mdelay(20);
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
#endif
} else {
save->crtc_enabled[i] = false;
}
}
}
static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
u32 tmp;
int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
}
mdelay(20);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
/* Unlock vga access */
WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
}
static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
bool render) bool render)
{ {
...@@ -3588,8 +3513,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { ...@@ -3588,8 +3513,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
.add_encoder = &dce_v8_0_encoder_add, .add_encoder = &dce_v8_0_encoder_add,
.add_connector = &amdgpu_connector_add, .add_connector = &amdgpu_connector_add,
.stop_mc_access = &dce_v8_0_stop_mc_access,
.resume_mc_access = &dce_v8_0_resume_mc_access,
}; };
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
......
...@@ -95,56 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) ...@@ -95,56 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
return 0; return 0;
} }
static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
dce_v6_0_disable_dce(adev);
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
dce_v8_0_disable_dce(adev);
break;
#endif
case CHIP_FIJI:
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
#endif
/* no DCE */
return;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
}
return;
}
static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
return;
}
/** /**
* dce_virtual_bandwidth_update - program display watermarks * dce_virtual_bandwidth_update - program display watermarks
* *
...@@ -516,6 +466,45 @@ static int dce_virtual_sw_fini(void *handle) ...@@ -516,6 +466,45 @@ static int dce_virtual_sw_fini(void *handle)
static int dce_virtual_hw_init(void *handle) static int dce_virtual_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
dce_v6_0_disable_dce(adev);
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
dce_v8_0_disable_dce(adev);
break;
#endif
case CHIP_FIJI:
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
#endif
/* no DCE */
break;
default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
}
return 0; return 0;
} }
...@@ -683,8 +672,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { ...@@ -683,8 +672,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
.add_encoder = NULL, .add_encoder = NULL,
.add_connector = NULL, .add_connector = NULL,
.stop_mc_access = &dce_virtual_stop_mc_access,
.resume_mc_access = &dce_virtual_resume_mc_access,
}; };
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
......
...@@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] = ...@@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET SI_CRTC5_REGISTER_OFFSET
}; };
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 blackout; u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
gmc_v6_0_wait_for_idle((void *)adev); gmc_v6_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
...@@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, ...@@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
} }
static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 tmp; u32 tmp;
...@@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, ...@@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp); WREG32(mmBIF_FB_EN, tmp);
if (adev->mode_info.num_crtc)
amdgpu_display_resume_mc_access(adev, save);
} }
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
...@@ -975,7 +966,6 @@ static int gmc_v6_0_wait_for_idle(void *handle) ...@@ -975,7 +966,6 @@ static int gmc_v6_0_wait_for_idle(void *handle)
static int gmc_v6_0_soft_reset(void *handle) static int gmc_v6_0_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS); u32 tmp = RREG32(mmSRBM_STATUS);
...@@ -991,7 +981,7 @@ static int gmc_v6_0_soft_reset(void *handle) ...@@ -991,7 +981,7 @@ static int gmc_v6_0_soft_reset(void *handle)
} }
if (srbm_soft_reset) { if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev, &save); gmc_v6_0_mc_stop(adev);
if (gmc_v6_0_wait_for_idle(adev)) { if (gmc_v6_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
} }
...@@ -1011,7 +1001,7 @@ static int gmc_v6_0_soft_reset(void *handle) ...@@ -1011,7 +1001,7 @@ static int gmc_v6_0_soft_reset(void *handle)
udelay(50); udelay(50);
gmc_v6_0_mc_resume(adev, &save); gmc_v6_0_mc_resume(adev);
udelay(50); udelay(50);
} }
......
...@@ -76,14 +76,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -76,14 +76,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
} }
} }
static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 blackout; u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
gmc_v7_0_wait_for_idle((void *)adev); gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
...@@ -99,8 +95,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, ...@@ -99,8 +95,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100); udelay(100);
} }
static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 tmp; u32 tmp;
...@@ -112,9 +107,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, ...@@ -112,9 +107,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp); WREG32(mmBIF_FB_EN, tmp);
if (adev->mode_info.num_crtc)
amdgpu_display_resume_mc_access(adev, save);
} }
/** /**
...@@ -1128,7 +1120,6 @@ static int gmc_v7_0_wait_for_idle(void *handle) ...@@ -1128,7 +1120,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
static int gmc_v7_0_soft_reset(void *handle) static int gmc_v7_0_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS); u32 tmp = RREG32(mmSRBM_STATUS);
...@@ -1144,7 +1135,7 @@ static int gmc_v7_0_soft_reset(void *handle) ...@@ -1144,7 +1135,7 @@ static int gmc_v7_0_soft_reset(void *handle)
} }
if (srbm_soft_reset) { if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev, &save); gmc_v7_0_mc_stop(adev);
if (gmc_v7_0_wait_for_idle((void *)adev)) { if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
} }
...@@ -1165,7 +1156,7 @@ static int gmc_v7_0_soft_reset(void *handle) ...@@ -1165,7 +1156,7 @@ static int gmc_v7_0_soft_reset(void *handle)
/* Wait a little for things to settle down */ /* Wait a little for things to settle down */
udelay(50); udelay(50);
gmc_v7_0_mc_resume(adev, &save); gmc_v7_0_mc_resume(adev);
udelay(50); udelay(50);
} }
......
...@@ -161,14 +161,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -161,14 +161,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
} }
} }
static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 blackout; u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
gmc_v8_0_wait_for_idle(adev); gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
...@@ -184,8 +180,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, ...@@ -184,8 +180,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100); udelay(100);
} }
static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
struct amdgpu_mode_mc_save *save)
{ {
u32 tmp; u32 tmp;
...@@ -197,9 +192,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, ...@@ -197,9 +192,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp); WREG32(mmBIF_FB_EN, tmp);
if (adev->mode_info.num_crtc)
amdgpu_display_resume_mc_access(adev, save);
} }
/** /**
...@@ -1250,7 +1242,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle) ...@@ -1250,7 +1242,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset) if (!adev->mc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save); gmc_v8_0_mc_stop(adev);
if (gmc_v8_0_wait_for_idle(adev)) { if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
} }
...@@ -1296,7 +1288,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) ...@@ -1296,7 +1288,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset) if (!adev->mc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save); gmc_v8_0_mc_resume(adev);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment