Commit 26bf62e4 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-radeon-next' of ../drm-radeon-next into drm-core-next

* 'drm-radeon-next' of ../drm-radeon-next:
  drm/radeon/kms: add drm blit support for evergreen
  drm/radeon: Modify radeon_pm_in_vbl to use radeon_get_crtc_scanoutpos()
  drm/radeon: Add function for display scanout position query.
  drm/radeon/kms: rework spread spectrum handling
  drm/radeon/kms: remove new pll algo
  drm/radeon/kms: remove some pll algo flags
  drm/radeon/kms: prefer high post dividers in legacy pll algo
  drm/radeon/kms: properly handle 40 bit MC addresses in the cursor code
  drm/radeon: add properties to configure the width of the underscan borders
  drm/radeon/kms/r6xx+: use new style fencing (v3)
  drm/radeon/kms: enable writeback (v2)
  drm/radeon/kms: clean up r6xx/r7xx blit init (v2)
parents 96a03fce d7ccd8fc
...@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ ...@@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
......
This diff is collapsed.
...@@ -731,7 +731,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -731,7 +731,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8); rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -745,8 +745,19 @@ int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -745,8 +745,19 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0); WREG32(CP_RB_WPTR, 0);
WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); /* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
mdelay(1); mdelay(1);
WREG32(CP_RB_CNTL, tmp); WREG32(CP_RB_CNTL, tmp);
...@@ -1583,6 +1594,7 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -1583,6 +1594,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) { if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n"); DRM_DEBUG("evergreen_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); DRM_DEBUG("evergreen_irq_set: vblank 0\n");
...@@ -1759,8 +1771,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) ...@@ -1759,8 +1771,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{ {
u32 wptr, tmp; u32 wptr, tmp;
/* XXX use writeback */ if (rdev->wb.enabled)
wptr = RREG32(IH_RB_WPTR); wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
else
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
...@@ -1999,6 +2013,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -1999,6 +2013,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
break; break;
case 181: /* CP EOP event */ case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n"); DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n"); DRM_DEBUG("IH: CP EOP\n");
...@@ -2047,26 +2062,18 @@ static int evergreen_startup(struct radeon_device *rdev) ...@@ -2047,26 +2062,18 @@ static int evergreen_startup(struct radeon_device *rdev)
return r; return r;
} }
evergreen_gpu_init(rdev); evergreen_gpu_init(rdev);
#if 0
if (!rdev->r600_blit.shader_obj) {
r = r600_blit_init(rdev);
if (r) {
DRM_ERROR("radeon: failed blitter (%d).\n", r);
return r;
}
}
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); r = evergreen_blit_init(rdev);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) { if (r) {
DRM_ERROR("failed to pin blit object %d\n", r); evergreen_blit_fini(rdev);
return r; rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
} }
#endif
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
r = r600_irq_init(rdev); r = r600_irq_init(rdev);
...@@ -2086,8 +2093,6 @@ static int evergreen_startup(struct radeon_device *rdev) ...@@ -2086,8 +2093,6 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_resume(rdev); r = evergreen_cp_resume(rdev);
if (r) if (r)
return r; return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
return 0; return 0;
} }
...@@ -2121,23 +2126,43 @@ int evergreen_resume(struct radeon_device *rdev) ...@@ -2121,23 +2126,43 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev)
{ {
#if 0
int r; int r;
#endif
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev); r700_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
evergreen_irq_suspend(rdev); evergreen_irq_suspend(rdev);
r600_wb_disable(rdev); radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev); evergreen_pcie_gart_disable(rdev);
#if 0
/* unpin shaders bo */ /* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) { if (likely(r == 0)) {
radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj);
} }
#endif
return 0;
}
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0; return 0;
} }
...@@ -2245,8 +2270,8 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -2245,8 +2270,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r) { if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n"); dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev); r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev); evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false; rdev->accel_working = false;
...@@ -2268,10 +2293,10 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -2268,10 +2293,10 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev)
{ {
/*r600_blit_fini(rdev);*/ evergreen_blit_fini(rdev);
r700_cp_fini(rdev); r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev); evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
......
This diff is collapsed.
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*/
#include <linux/types.h>
#include <linux/kernel.h>
/*
* evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use
* statically generated state tables. The regsiter state and shaders
* were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and
* shader instructions.
*/
const u32 evergreen_default_state[] =
{
0xc0012800, /* CONTEXT_CONTROL */
0x80000000,
0x80000000,
0xc0016900,
0x0000023b,
0x00000000, /* SQ_LDS_ALLOC_PS */
0xc0066900,
0x00000240,
0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xc0046900,
0x00000247,
0x00000000, /* SQ_GS_VERT_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
0xc0026f00,
0x00000000,
0x00000000, /* SQ_VTX_BASE_VTX_LOC */
0x00000000,
0xc0026900,
0x00000010,
0x00000000, /* DB_Z_INFO */
0x00000000, /* DB_STENCIL_INFO */
0xc0016900,
0x00000200,
0x00000000, /* DB_DEPTH_CONTROL */
0xc0066900,
0x00000000,
0x00000060, /* DB_RENDER_CONTROL */
0x00000000, /* DB_COUNT_CONTROL */
0x00000000, /* DB_DEPTH_VIEW */
0x0000002a, /* DB_RENDER_OVERRIDE */
0x00000000, /* DB_RENDER_OVERRIDE2 */
0x00000000, /* DB_HTILE_DATA_BASE */
0xc0026900,
0x0000000a,
0x00000000, /* DB_STENCIL_CLEAR */
0x00000000, /* DB_DEPTH_CLEAR */
0xc0016900,
0x000002dc,
0x0000aa00, /* DB_ALPHA_TO_MASK */
0xc0016900,
0x00000080,
0x00000000, /* PA_SC_WINDOW_OFFSET */
0xc00d6900,
0x00000083,
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000, /* PA_SC_CLIPRECT_0_BR */
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0xaaaaaaaa, /* PA_SC_EDGERULE */
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
0x0000000f, /* CB_TARGET_MASK */
0x0000000f, /* CB_SHADER_MASK */
0xc0226900,
0x00000094,
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
0xc0026900,
0x00000292,
0x00000000, /* PA_SC_MODE_CNTL_0 */
0x00000000, /* PA_SC_MODE_CNTL_1 */
0xc0106900,
0x00000300,
0x00000000, /* PA_SC_LINE_CNTL */
0x00000000, /* PA_SC_AA_CONFIG */
0x00000005, /* PA_SU_VTX_CNTL */
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
0xffffffff, /* PA_SC_AA_MASK */
0xc00d6900,
0x00000202,
0x00cc0010, /* CB_COLOR_CONTROL */
0x00000210, /* DB_SHADER_CONTROL */
0x00010000, /* PA_CL_CLIP_CNTL */
0x00000004, /* PA_SU_SC_MODE_CNTL */
0x00000100, /* PA_CL_VTE_CNTL */
0x00000000, /* PA_CL_VS_OUT_CNTL */
0x00000000, /* PA_CL_NANINF_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
0xc0066900,
0x000002de,
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0xc0016900,
0x00000229,
0x00000000, /* SQ_PGM_START_FS */
0xc0016900,
0x0000022a,
0x00000000, /* SQ_PGM_RESOURCES_FS */
0xc0096900,
0x00000100,
0x00ffffff, /* VGT_MAX_VTX_INDX */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* SX_ALPHA_TEST_CONTROL */
0x00000000, /* CB_BLEND_RED */
0x00000000, /* CB_BLEND_GREEN */
0x00000000, /* CB_BLEND_BLUE */
0x00000000, /* CB_BLEND_ALPHA */
0xc0026900,
0x000002a8,
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
0x00000000, /* */
0xc0026900,
0x000002ad,
0x00000000, /* VGT_REUSE_OFF */
0x00000000, /* */
0xc0116900,
0x00000280,
0x00000000, /* PA_SU_POINT_SIZE */
0x00000000, /* PA_SU_POINT_MINMAX */
0x00000008, /* PA_SU_LINE_CNTL */
0x00000000, /* PA_SC_LINE_STIPPLE */
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
0x00000000, /* VGT_HOS_CNTL */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* VGT_GS_MODE */
0xc0016900,
0x000002a1,
0x00000000, /* VGT_PRIMITIVEID_EN */
0xc0016900,
0x000002a5,
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
0xc0016900,
0x000002d5,
0x00000000, /* VGT_SHADER_STAGES_EN */
0xc0026900,
0x000002e5,
0x00000000, /* VGT_STRMOUT_CONFIG */
0x00000000, /* */
0xc0016900,
0x000001e0,
0x00000000, /* CB_BLEND0_CONTROL */
0xc0016900,
0x000001b1,
0x00000000, /* SPI_VS_OUT_CONFIG */
0xc0016900,
0x00000187,
0x00000000, /* SPI_VS_OUT_ID_0 */
0xc0016900,
0x00000191,
0x00000100, /* SPI_PS_INPUT_CNTL_0 */
0xc00b6900,
0x000001b3,
0x20000001, /* SPI_PS_IN_CONTROL_0 */
0x00000000, /* SPI_PS_IN_CONTROL_1 */
0x00000000, /* SPI_INTERP_CONTROL_0 */
0x00000000, /* SPI_INPUT_Z */
0x00000000, /* SPI_FOG_CNTL */
0x00100000, /* SPI_BARYC_CNTL */
0x00000000, /* SPI_PS_IN_CONTROL_2 */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0x00000000, /* */
0xc0036e00, /* SET_SAMPLER */
0x00000000,
0x00000012,
0x00000000,
0x00000000,
};
const u32 evergreen_vs[] =
{
0x00000004,
0x80800400,
0x0000a03c,
0x95000688,
0x00004000,
0x15200688,
0x00000000,
0x00000000,
0x3c000000,
0x67961001,
0x00080000,
0x00000000,
0x1c000000,
0x67961000,
0x00000008,
0x00000000,
};
const u32 evergreen_ps[] =
{
0x00000003,
0xa00c0000,
0x00000008,
0x80400000,
0x00000000,
0x95200688,
0x00380400,
0x00146b10,
0x00380000,
0x20146b10,
0x00380400,
0x40146b00,
0x80380000,
0x60146b00,
0x00000000,
0x00000000,
0x00000010,
0x000d1000,
0xb0800000,
0x00000000,
};
const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
/*
* Copyright 2009 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef EVERGREEN_BLIT_SHADERS_H
#define EVERGREEN_BLIT_SHADERS_H
extern const u32 evergreen_ps[];
extern const u32 evergreen_vs[];
extern const u32 evergreen_default_state[];
extern const u32 evergreen_ps_size, evergreen_vs_size;
extern const u32 evergreen_default_size;
#endif
...@@ -802,6 +802,11 @@ ...@@ -802,6 +802,11 @@
#define SQ_ALU_CONST_CACHE_LS_14 0x28f78 #define SQ_ALU_CONST_CACHE_LS_14 0x28f78
#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
#define VGT_PRIMITIVE_TYPE 0x8958
#define DB_DEPTH_CONTROL 0x28800 #define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008 #define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014 #define DB_HTILE_DATA_BASE 0x28014
......
...@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, ...@@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE); radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
} }
int r100_wb_init(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->wb.wb_obj);
if (r) {
dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
WREG32(R_00070C_CP_RB_RPTR_ADDR,
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
WREG32(R_000770_SCRATCH_UMSK, 0xff);
return 0;
}
void r100_wb_disable(struct radeon_device *rdev)
{
WREG32(R_000770_SCRATCH_UMSK, 0);
}
void r100_wb_fini(struct radeon_device *rdev)
{
int r;
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
dev_err(rdev->dev, "(%d) can't finish WB\n", r);
return;
}
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
...@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) | REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch) | REG_SET(RADEON_MAX_FETCH, max_fetch));
RADEON_RB_NO_UPDATE);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT; tmp |= RADEON_BUF_SWAP_32BIT;
#endif #endif
WREG32(RADEON_CP_RB_CNTL, tmp); WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */ /* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */ /* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0); WREG32(RADEON_CP_RB_WPTR, 0);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
if (rdev->wb.enabled)
WREG32(R_000770_SCRATCH_UMSK, 0xff);
else {
tmp |= RADEON_RB_NO_UPDATE;
WREG32(R_000770_SCRATCH_UMSK, 0);
}
WREG32(RADEON_CP_RB_CNTL, tmp); WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10); udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
...@@ -1050,6 +1001,7 @@ void r100_cp_disable(struct radeon_device *rdev) ...@@ -1050,6 +1001,7 @@ void r100_cp_disable(struct radeon_device *rdev)
rdev->cp.ready = false; rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0); WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
if (r100_gui_wait_for_idle(rdev)) { if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while " printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n"); "programming pipes. Bad things might happen.\n");
...@@ -3734,6 +3686,12 @@ static int r100_startup(struct radeon_device *rdev) ...@@ -3734,6 +3686,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
} }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
r100_irq_set(rdev); r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -3743,9 +3701,6 @@ static int r100_startup(struct radeon_device *rdev) ...@@ -3743,9 +3701,6 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -3779,7 +3734,7 @@ int r100_resume(struct radeon_device *rdev) ...@@ -3779,7 +3734,7 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev) int r100_suspend(struct radeon_device *rdev)
{ {
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
r100_irq_disable(rdev); r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCI) if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev); r100_pci_gart_disable(rdev);
...@@ -3789,7 +3744,7 @@ int r100_suspend(struct radeon_device *rdev) ...@@ -3789,7 +3744,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev)
{ {
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI) if (rdev->flags & RADEON_IS_PCI)
...@@ -3902,7 +3857,7 @@ int r100_init(struct radeon_device *rdev) ...@@ -3902,7 +3857,7 @@ int r100_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI) if (rdev->flags & RADEON_IS_PCI)
......
...@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev) ...@@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
} }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
r100_irq_set(rdev); r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev) ...@@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev) ...@@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev) int r300_suspend(struct radeon_device *rdev)
{ {
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
r100_irq_disable(rdev); r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev); rv370_pcie_gart_disable(rdev);
...@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev) ...@@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev)
{ {
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
...@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev) ...@@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
......
...@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev) ...@@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev)
return r; return r;
} }
r420_pipes_init(rdev); r420_pipes_init(rdev);
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
r100_irq_set(rdev); r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev) ...@@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev)
return r; return r;
} }
r420_cp_errata_init(rdev); r420_cp_errata_init(rdev);
r = r100_wb_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
}
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev) ...@@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev)
{ {
r420_cp_errata_fini(rdev); r420_cp_errata_fini(rdev);
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
r100_irq_disable(rdev); r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev); rv370_pcie_gart_disable(rdev);
...@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev) ...@@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev) void r420_fini(struct radeon_device *rdev)
{ {
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
...@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev) ...@@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
......
...@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev) ...@@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
} }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
rs600_irq_set(rdev); rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev) ...@@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev) ...@@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev); rv370_pcie_gart_fini(rdev);
......
...@@ -1918,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) ...@@ -1918,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
void r600_cp_stop(struct radeon_device *rdev) void r600_cp_stop(struct radeon_device *rdev)
{ {
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
} }
int r600_init_microcode(struct radeon_device *rdev) int r600_init_microcode(struct radeon_device *rdev)
...@@ -2150,7 +2151,7 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -2150,7 +2151,7 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8); rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -2164,8 +2165,19 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -2164,8 +2165,19 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0); WREG32(CP_RB_WPTR, 0);
WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); /* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
mdelay(1); mdelay(1);
WREG32(CP_RB_CNTL, tmp); WREG32(CP_RB_CNTL, tmp);
...@@ -2217,9 +2229,10 @@ void r600_scratch_init(struct radeon_device *rdev) ...@@ -2217,9 +2229,10 @@ void r600_scratch_init(struct radeon_device *rdev)
int i; int i;
rdev->scratch.num_reg = 7; rdev->scratch.num_reg = 7;
rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) { for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true; rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
} }
} }
...@@ -2263,88 +2276,34 @@ int r600_ring_test(struct radeon_device *rdev) ...@@ -2263,88 +2276,34 @@ int r600_ring_test(struct radeon_device *rdev)
return r; return r;
} }
void r600_wb_disable(struct radeon_device *rdev)
{
int r;
WREG32(SCRATCH_UMSK, 0);
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return;
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
}
void r600_wb_fini(struct radeon_device *rdev)
{
r600_wb_disable(rdev);
if (rdev->wb.wb_obj) {
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
int r600_wb_enable(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
r600_wb_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
}
WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
WREG32(SCRATCH_UMSK, 0xff);
return 0;
}
void r600_fence_ring_emit(struct radeon_device *rdev, void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); /* EVENT_WRITE_EOP - flush caches, send int */
/* wait for 3D idle clean */ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(rdev, addr & 0xffffffff);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
/* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); } else {
radeon_ring_write(rdev, fence->seq); radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); /* wait for 3D idle clean */
radeon_ring_write(rdev, RB_INT_STAT); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
}
} }
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
...@@ -2426,19 +2385,12 @@ int r600_startup(struct radeon_device *rdev) ...@@ -2426,19 +2385,12 @@ int r600_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL; rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
} }
/* pin copy shader into vram */
if (rdev->r600_blit.shader_obj) { /* allocate wb buffer */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); r = radeon_wb_init(rdev);
if (unlikely(r != 0)) if (r)
return r; return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
}
/* Enable IRQ */ /* Enable IRQ */
r = r600_irq_init(rdev); r = r600_irq_init(rdev);
if (r) { if (r) {
...@@ -2457,8 +2409,7 @@ int r600_startup(struct radeon_device *rdev) ...@@ -2457,8 +2409,7 @@ int r600_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev); r = r600_cp_resume(rdev);
if (r) if (r)
return r; return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
return 0; return 0;
} }
...@@ -2517,7 +2468,7 @@ int r600_suspend(struct radeon_device *rdev) ...@@ -2517,7 +2468,7 @@ int r600_suspend(struct radeon_device *rdev)
r600_cp_stop(rdev); r600_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
r600_irq_suspend(rdev); r600_irq_suspend(rdev);
r600_wb_disable(rdev); radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev); r600_pcie_gart_disable(rdev);
/* unpin shaders bo */ /* unpin shaders bo */
if (rdev->r600_blit.shader_obj) { if (rdev->r600_blit.shader_obj) {
...@@ -2614,8 +2565,8 @@ int r600_init(struct radeon_device *rdev) ...@@ -2614,8 +2565,8 @@ int r600_init(struct radeon_device *rdev)
if (r) { if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n"); dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev); r600_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev); r600_pcie_gart_fini(rdev);
rdev->accel_working = false; rdev->accel_working = false;
...@@ -2645,8 +2596,8 @@ void r600_fini(struct radeon_device *rdev) ...@@ -2645,8 +2596,8 @@ void r600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev); r600_audio_fini(rdev);
r600_blit_fini(rdev); r600_blit_fini(rdev);
r600_cp_fini(rdev); r600_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev); r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev); radeon_agp_fini(rdev);
...@@ -2981,10 +2932,13 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -2981,10 +2932,13 @@ int r600_irq_init(struct radeon_device *rdev)
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR | IH_WPTR_OVERFLOW_CLEAR |
(rb_bufsz << 1)); (rb_bufsz << 1));
/* WPTR writeback, not yet */
/*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ if (rdev->wb.enabled)
WREG32(IH_RB_WPTR_ADDR_LO, 0); ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
WREG32(IH_RB_WPTR_ADDR_HI, 0);
/* set the writeback address whether it's enabled or not */
WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
WREG32(IH_RB_CNTL, ih_rb_cntl); WREG32(IH_RB_CNTL, ih_rb_cntl);
...@@ -3068,6 +3022,7 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3068,6 +3022,7 @@ int r600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) { if (rdev->irq.sw_int) {
DRM_DEBUG("r600_irq_set: sw int\n"); DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0]) { if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n"); DRM_DEBUG("r600_irq_set: vblank 0\n");
...@@ -3242,8 +3197,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) ...@@ -3242,8 +3197,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
{ {
u32 wptr, tmp; u32 wptr, tmp;
/* XXX use writeback */ if (rdev->wb.enabled)
wptr = RREG32(IH_RB_WPTR); wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
else
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt /* When a ring buffer overflow happen start parsing interrupt
...@@ -3431,6 +3388,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3431,6 +3388,7 @@ int r600_irq_process(struct radeon_device *rdev)
break; break;
case 181: /* CP EOP event */ case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n"); DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n"); DRM_DEBUG("IH: CP EOP\n");
......
...@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev) ...@@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16]; u32 packet2s[16];
int num_packet2s = 0; int num_packet2s = 0;
/* don't reinitialize blit */ /* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj) if (rdev->r600_blit.shader_obj)
return 0; goto done;
mutex_init(&rdev->r600_blit.mutex); mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0; rdev->r600_blit.state_offset = 0;
...@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev) ...@@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev)
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj);
done:
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
return 0; return 0;
} }
...@@ -552,7 +565,7 @@ void r600_blit_fini(struct radeon_device *rdev) ...@@ -552,7 +565,7 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj); radeon_bo_unref(&rdev->r600_blit.shader_obj);
} }
int r600_vb_ib_get(struct radeon_device *rdev) static int r600_vb_ib_get(struct radeon_device *rdev)
{ {
int r; int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
...@@ -566,7 +579,7 @@ int r600_vb_ib_get(struct radeon_device *rdev) ...@@ -566,7 +579,7 @@ int r600_vb_ib_get(struct radeon_device *rdev)
return 0; return 0;
} }
void r600_vb_ib_put(struct radeon_device *rdev) static void r600_vb_ib_put(struct radeon_device *rdev)
{ {
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
...@@ -670,17 +683,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -670,17 +683,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1); WARN_ON(1);
#if 0
r600_vb_ib_put(rdev);
r600_nomm_put_vb(dev);
r600_nomm_get_vb(dev);
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
#endif
} }
vb[0] = i2f(dst_x); vb[0] = i2f(dst_x);
...@@ -765,17 +767,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -765,17 +767,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1); WARN_ON(1);
} }
#if 0
if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
r600_nomm_put_vb(dev);
r600_nomm_get_vb(dev);
if (!rdev->blit_vb)
return;
set_shaders(dev);
vb = r600_nomm_get_vb_ptr(dev);
}
#endif
vb[0] = i2f(dst_x / 4); vb[0] = i2f(dst_x / 4);
vb[1] = 0; vb[1] = 0;
......
...@@ -474,6 +474,7 @@ ...@@ -474,6 +474,7 @@
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF #define VTX_REUSE_DEPTH_MASK 0x000000FF
#define VGT_EVENT_INITIATOR 0x28a90 #define VGT_EVENT_INITIATOR 0x28a90
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
#define VM_CONTEXT0_CNTL 0x1410 #define VM_CONTEXT0_CNTL 0x1410
...@@ -775,7 +776,27 @@ ...@@ -775,7 +776,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45 #define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46 #define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - TS events
*/
#define PACKET3_EVENT_WRITE_EOP 0x47 #define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
......
...@@ -88,7 +88,6 @@ extern int radeon_benchmarking; ...@@ -88,7 +88,6 @@ extern int radeon_benchmarking;
extern int radeon_testing; extern int radeon_testing;
extern int radeon_connector_table; extern int radeon_connector_table;
extern int radeon_tv; extern int radeon_tv;
extern int radeon_new_pll;
extern int radeon_audio; extern int radeon_audio;
extern int radeon_disp_priority; extern int radeon_disp_priority;
extern int radeon_hw_i2c; extern int radeon_hw_i2c;
...@@ -365,6 +364,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev); ...@@ -365,6 +364,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev);
*/ */
struct radeon_scratch { struct radeon_scratch {
unsigned num_reg; unsigned num_reg;
uint32_t reg_base;
bool free[32]; bool free[32];
uint32_t reg[32]; uint32_t reg[32];
}; };
...@@ -593,8 +593,15 @@ struct radeon_wb { ...@@ -593,8 +593,15 @@ struct radeon_wb {
struct radeon_bo *wb_obj; struct radeon_bo *wb_obj;
volatile uint32_t *wb; volatile uint32_t *wb;
uint64_t gpu_addr; uint64_t gpu_addr;
bool enabled;
bool use_event;
}; };
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define R600_WB_IH_WPTR_OFFSET 2048
#define R600_WB_EVENT_OFFSET 3072
/** /**
* struct radeon_pm - power management datas * struct radeon_pm - power management datas
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s) * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
...@@ -1123,6 +1130,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) ...@@ -1123,6 +1130,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_kms_blit_copy(struct radeon_device *rdev, void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr, u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes); int size_bytes);
/* evergreen blit */
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
...@@ -1340,6 +1353,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev); ...@@ -1340,6 +1353,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
extern void radeon_update_display_priority(struct radeon_device *rdev); extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev); extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_wb_fini(struct radeon_device *rdev);
extern int radeon_wb_init(struct radeon_device *rdev);
extern void radeon_wb_disable(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
...@@ -1424,9 +1440,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); ...@@ -1424,9 +1440,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev); extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev); extern int r600_ring_test(struct radeon_device *rdev);
extern void r600_wb_fini(struct radeon_device *rdev);
extern int r600_wb_enable(struct radeon_device *rdev);
extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev);
...@@ -1464,6 +1477,8 @@ extern void r700_cp_stop(struct radeon_device *rdev); ...@@ -1464,6 +1477,8 @@ extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
extern int evergreen_irq_set(struct radeon_device *rdev); extern int evergreen_irq_set(struct radeon_device *rdev);
extern int evergreen_blit_init(struct radeon_device *rdev);
extern void evergreen_blit_fini(struct radeon_device *rdev);
/* radeon_acpi.c */ /* radeon_acpi.c */
#if defined(CONFIG_ACPI) #if defined(CONFIG_ACPI)
......
...@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = { ...@@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = {
.get_vblank_counter = &evergreen_get_vblank_counter, .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit, .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse, .cs_parse = &evergreen_cs_parse,
.copy_blit = NULL, .copy_blit = &evergreen_copy_blit,
.copy_dma = NULL, .copy_dma = &evergreen_copy_blit,
.copy = NULL, .copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock, .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock, .get_memory_clock = &radeon_atom_get_memory_clock,
......
...@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev); ...@@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_vram_init_sizes(struct radeon_device *rdev); void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev); int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev); void r100_vga_render_disable(struct radeon_device *rdev);
void r100_restore_sanity(struct radeon_device *rdev); void r100_restore_sanity(struct radeon_device *rdev);
...@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); ...@@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p); int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev, void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev); int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev); int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev); bool r600_gpu_is_lockup(struct radeon_device *rdev);
...@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev); ...@@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev); bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev); int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev); void evergreen_bandwidth_update(struct radeon_device *rdev);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
......
...@@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) ...@@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family. * family.
*/ */
if (!radeon_new_pll) p1pll->pll_out_min = 64800;
p1pll->pll_out_min = 64800;
} }
p1pll->pll_in_min = p1pll->pll_in_min =
...@@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, ...@@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
return false; return false;
} }
static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
radeon_encoder struct radeon_atom_ss *ss,
*encoder, int id)
int id)
{ {
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info; struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset; uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev; uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL; int i, num_indices;
int i;
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
if (atom_parse_data_header(mode_info->atom_context, index, NULL, memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) { &frev, &crev, &data_offset)) {
ss_info = ss_info =
(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
ss = num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
if (!ss)
return NULL;
for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { for (i = 0; i < num_indices; i++) {
if (ss_info->asSS_Info[i].ucSS_Id == id) { if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage = ss->percentage =
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
...@@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ...@@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
break; return true;
}
}
}
return false;
}
union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO info;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
uint8_t frev, crev;
int i, num_indices;
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
ss_info =
(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
switch (frev) {
case 1:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
return true;
}
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
return true;
}
} }
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
for (i = 0; i < num_indices; i++) {
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
ss->percentage =
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
return true;
}
}
break;
default:
DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
break;
} }
} }
return ss; return false;
} }
union lvds_info { union lvds_info {
...@@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ...@@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay = lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs); le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY) if (misc & ATOM_VSYNC_POLARITY)
...@@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ...@@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
/* set crtc values */ /* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll == 0)
lvds->pll_algo = PLL_ALGO_LEGACY;
else
lvds->pll_algo = PLL_ALGO_NEW;
} else {
if (radeon_new_pll == 1)
lvds->pll_algo = PLL_ALGO_NEW;
else
lvds->pll_algo = PLL_ALGO_LEGACY;
}
encoder->native_mode = lvds->native_mode; encoder->native_mode = lvds->native_mode;
......
...@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr ...@@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
} }
} }
if (property == rdev->mode_info.underscan_hborder_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->underscan_hborder != val) {
radeon_encoder->underscan_hborder = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
if (property == rdev->mode_info.underscan_vborder_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->underscan_vborder != val) {
radeon_encoder->underscan_vborder = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
if (property == rdev->mode_info.tv_std_property) { if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) { if (!encoder) {
...@@ -1153,10 +1181,17 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1153,10 +1181,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property, rdev->mode_info.underscan_property,
UNDERSCAN_AUTO); UNDERSCAN_AUTO);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) { if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true; radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
...@@ -1181,10 +1216,17 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1181,10 +1216,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property, rdev->mode_info.underscan_property,
UNDERSCAN_AUTO); UNDERSCAN_AUTO);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
subpixel_order = SubPixelHorizontalRGB; subpixel_order = SubPixelHorizontalRGB;
break; break;
case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_DisplayPort:
...@@ -1212,10 +1254,17 @@ radeon_add_atom_connector(struct drm_device *dev, ...@@ -1212,10 +1254,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property, rdev->mode_info.coherent_mode_property,
1); 1);
if (ASIC_IS_AVIVO(rdev)) if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base, drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property, rdev->mode_info.underscan_property,
UNDERSCAN_AUTO); UNDERSCAN_AUTO);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_vborder_property,
0);
}
break; break;
case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_Composite:
......
...@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc) ...@@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
} }
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
uint32_t gpu_addr) uint64_t gpu_addr)
{ {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private; struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) { if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); upper_32_bits(gpu_addr));
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
gpu_addr & 0xffffffff);
} else if (ASIC_IS_AVIVO(rdev)) { } else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) { if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
else else
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
} }
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
gpu_addr & 0xffffffff);
} else { } else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */ /* offset is from DISP(2)_BASE_ADDRESS */
......
...@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev) ...@@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev)
} else { } else {
rdev->scratch.num_reg = 7; rdev->scratch.num_reg = 7;
} }
rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) { for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true; rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
} }
} }
...@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) ...@@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
} }
} }
void radeon_wb_disable(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj) {
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0))
return;
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
rdev->wb.enabled = false;
}
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
}
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
}
r = radeon_bo_reserve(rdev->wb.wb_obj, false);
if (unlikely(r != 0)) {
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
radeon_wb_fini(rdev);
return r;
}
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
if (radeon_no_wb == 1)
rdev->wb.enabled = false;
else {
/* often unreliable on AGP */
if (rdev->flags & RADEON_IS_AGP) {
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600)
rdev->wb.use_event = true;
}
}
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
return 0;
}
/** /**
* radeon_vram_location - try to find VRAM location * radeon_vram_location - try to find VRAM location
* @rdev: radeon device structure holding all necessary informations * @rdev: radeon device structure holding all necessary informations
......
This diff is collapsed.
...@@ -93,7 +93,6 @@ int radeon_benchmarking = 0; ...@@ -93,7 +93,6 @@ int radeon_benchmarking = 0;
int radeon_testing = 0; int radeon_testing = 0;
int radeon_connector_table = 0; int radeon_connector_table = 0;
int radeon_tv = 1; int radeon_tv = 1;
int radeon_new_pll = -1;
int radeon_audio = 1; int radeon_audio = 1;
int radeon_disp_priority = 0; int radeon_disp_priority = 0;
int radeon_hw_i2c = 0; int radeon_hw_i2c = 0;
...@@ -131,9 +130,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); ...@@ -131,9 +130,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444); module_param_named(audio, radeon_audio, int, 0444);
......
...@@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) ...@@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1); args.v1.ucMisc |= (1 << 1);
} else { } else {
if (dig->linkb) if (dig->linkb)
...@@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) ...@@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0; args.v2.ucTemporal = 0;
args.v2.ucFRC = 0; args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
} }
if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
} }
} else { } else {
......
...@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) ...@@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false; bool wake = false;
unsigned long cjiffies; unsigned long cjiffies;
seq = RREG32(rdev->fence_drv.scratch_reg); if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = rdev->wb.wb[scratch_index/4];
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) { if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq; rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = jiffies; rdev->fence_drv.last_jiffies = jiffies;
......
...@@ -744,15 +744,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -744,15 +744,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll; pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY; pll->flags = RADEON_PLL_LEGACY;
if (radeon_new_pll == 1)
pll->algo = PLL_ALGO_NEW;
else
pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) { if (encoder->crtc == crtc) {
......
...@@ -139,22 +139,10 @@ struct radeon_tmds_pll { ...@@ -139,22 +139,10 @@ struct radeon_tmds_pll {
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
#define RADEON_PLL_USE_REF_DIV (1 << 2) #define RADEON_PLL_USE_REF_DIV (1 << 2)
#define RADEON_PLL_LEGACY (1 << 3) #define RADEON_PLL_LEGACY (1 << 3)
#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4) #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 4)
#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5) #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 5)
#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6) #define RADEON_PLL_USE_POST_DIV (1 << 6)
#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7) #define RADEON_PLL_IS_LCD (1 << 7)
#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
#define RADEON_PLL_IS_LCD (1 << 13)
/* pll algo */
enum radeon_pll_algo {
PLL_ALGO_LEGACY,
PLL_ALGO_NEW
};
struct radeon_pll { struct radeon_pll {
/* reference frequency */ /* reference frequency */
...@@ -188,8 +176,6 @@ struct radeon_pll { ...@@ -188,8 +176,6 @@ struct radeon_pll {
/* pll id */ /* pll id */
uint32_t id; uint32_t id;
/* pll algo */
enum radeon_pll_algo algo;
}; };
struct radeon_i2c_chan { struct radeon_i2c_chan {
...@@ -241,6 +227,8 @@ struct radeon_mode_info { ...@@ -241,6 +227,8 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property; struct drm_property *tmds_pll_property;
/* underscan */ /* underscan */
struct drm_property *underscan_property; struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
/* hardcoded DFP edid from BIOS */ /* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid; struct edid *bios_hardcoded_edid;
...@@ -337,22 +325,24 @@ struct radeon_encoder_ext_tmds { ...@@ -337,22 +325,24 @@ struct radeon_encoder_ext_tmds {
struct radeon_atom_ss { struct radeon_atom_ss {
uint16_t percentage; uint16_t percentage;
uint8_t type; uint8_t type;
uint8_t step; uint16_t step;
uint8_t delay; uint8_t delay;
uint8_t range; uint8_t range;
uint8_t refdiv; uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
}; };
struct radeon_encoder_atom_dig { struct radeon_encoder_atom_dig {
bool linkb; bool linkb;
/* atom dig */ /* atom dig */
bool coherent_mode; bool coherent_mode;
int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
/* atom lvds */ /* atom lvds/edp */
uint32_t lvds_misc; uint32_t lcd_misc;
uint16_t panel_pwr_delay; uint16_t panel_pwr_delay;
enum radeon_pll_algo pll_algo; uint32_t lcd_ss_id;
struct radeon_atom_ss *ss;
/* panel mode */ /* panel mode */
struct drm_display_mode native_mode; struct drm_display_mode native_mode;
}; };
...@@ -371,6 +361,8 @@ struct radeon_encoder { ...@@ -371,6 +361,8 @@ struct radeon_encoder {
uint32_t pixel_clock; uint32_t pixel_clock;
enum radeon_rmx_type rmx_type; enum radeon_rmx_type rmx_type;
enum radeon_underscan_type underscan_type; enum radeon_underscan_type underscan_type;
uint32_t underscan_hborder;
uint32_t underscan_vborder;
struct drm_display_mode native_mode; struct drm_display_mode native_mode;
void *enc_priv; void *enc_priv;
int audio_polling_active; int audio_polling_active;
...@@ -437,6 +429,11 @@ struct radeon_framebuffer { ...@@ -437,6 +429,11 @@ struct radeon_framebuffer {
struct drm_gem_object *obj; struct drm_gem_object *obj;
}; };
/* radeon_get_crtc_scanoutpos() return flags */
#define RADEON_SCANOUTPOS_VALID (1 << 0)
#define RADEON_SCANOUTPOS_INVBL (1 << 1)
#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
extern enum radeon_tv_std extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev); radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std extern enum radeon_tv_std
...@@ -492,6 +489,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); ...@@ -492,6 +489,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id);
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
extern void radeon_compute_pll(struct radeon_pll *pll, extern void radeon_compute_pll(struct radeon_pll *pll,
uint64_t freq, uint64_t freq,
uint32_t *dot_clock_p, uint32_t *dot_clock_p,
...@@ -543,6 +547,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -543,6 +547,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y); int x, int y);
extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid * extern struct edid *
radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
......
...@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) ...@@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
static bool radeon_pm_in_vbl(struct radeon_device *rdev) static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{ {
u32 stat_crtc = 0, vbl = 0, position = 0; int crtc, vpos, hpos, vbl_status;
bool in_vbl = true; bool in_vbl = true;
if (ASIC_IS_DCE4(rdev)) { /* Iterate over all active crtc's. All crtc's must be in vblank,
if (rdev->pm.active_crtcs & (1 << 0)) { * otherwise return in_vbl == false.
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + */
EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + if (rdev->pm.active_crtcs & (1 << crtc)) {
EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
} if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
if (rdev->pm.active_crtcs & (1 << 1)) { !(vbl_status & RADEON_SCANOUTPOS_INVBL))
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
}
if (rdev->pm.active_crtcs & (1 << 2)) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
}
if (rdev->pm.active_crtcs & (1 << 3)) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
}
if (rdev->pm.active_crtcs & (1 << 4)) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
}
if (rdev->pm.active_crtcs & (1 << 5)) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
}
} else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->pm.active_crtcs & (1 << 0)) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
}
if (position < vbl && position > 1)
in_vbl = false;
} else {
if (rdev->pm.active_crtcs & (1 << 0)) {
stat_crtc = RREG32(RADEON_CRTC_STATUS);
if (!(stat_crtc & 1))
in_vbl = false;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
stat_crtc = RREG32(RADEON_CRTC2_STATUS);
if (!(stat_crtc & 1))
in_vbl = false; in_vbl = false;
} }
} }
if (position < vbl && position > 1)
in_vbl = false;
return in_vbl; return in_vbl;
} }
......
...@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) ...@@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
*/ */
void radeon_ring_free_size(struct radeon_device *rdev) void radeon_ring_free_size(struct radeon_device *rdev)
{ {
if (rdev->family >= CHIP_R600) if (rdev->wb.enabled)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
else else {
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
else
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
}
/* This works because ring_size is a power of 2 */ /* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr; rdev->cp.ring_free_dw -= rdev->cp.wptr;
......
...@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev) ...@@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev); r = rs400_gart_enable(rdev);
if (r) if (r)
return r; return r;
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
r100_irq_set(rdev); r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev) ...@@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev) ...@@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev) int rs400_suspend(struct radeon_device *rdev)
{ {
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
r100_irq_disable(rdev); r100_irq_disable(rdev);
rs400_gart_disable(rdev); rs400_gart_disable(rdev);
return 0; return 0;
...@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev) ...@@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev)
{ {
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
rs400_gart_fini(rdev); rs400_gart_fini(rdev);
...@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev) ...@@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
rs400_gart_fini(rdev); rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
......
...@@ -795,6 +795,12 @@ static int rs600_startup(struct radeon_device *rdev) ...@@ -795,6 +795,12 @@ static int rs600_startup(struct radeon_device *rdev)
r = rs600_gart_enable(rdev); r = rs600_gart_enable(rdev);
if (r) if (r)
return r; return r;
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
rs600_irq_set(rdev); rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -804,9 +810,6 @@ static int rs600_startup(struct radeon_device *rdev) ...@@ -804,9 +810,6 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -847,7 +850,7 @@ int rs600_suspend(struct radeon_device *rdev) ...@@ -847,7 +850,7 @@ int rs600_suspend(struct radeon_device *rdev)
{ {
r600_audio_fini(rdev); r600_audio_fini(rdev);
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
rs600_irq_disable(rdev); rs600_irq_disable(rdev);
rs600_gart_disable(rdev); rs600_gart_disable(rdev);
return 0; return 0;
...@@ -857,7 +860,7 @@ void rs600_fini(struct radeon_device *rdev) ...@@ -857,7 +860,7 @@ void rs600_fini(struct radeon_device *rdev)
{ {
r600_audio_fini(rdev); r600_audio_fini(rdev);
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
rs600_gart_fini(rdev); rs600_gart_fini(rdev);
...@@ -931,7 +934,7 @@ int rs600_init(struct radeon_device *rdev) ...@@ -931,7 +934,7 @@ int rs600_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
rs600_gart_fini(rdev); rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
......
...@@ -615,6 +615,12 @@ static int rs690_startup(struct radeon_device *rdev) ...@@ -615,6 +615,12 @@ static int rs690_startup(struct radeon_device *rdev)
r = rs400_gart_enable(rdev); r = rs400_gart_enable(rdev);
if (r) if (r)
return r; return r;
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
rs600_irq_set(rdev); rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -624,9 +630,6 @@ static int rs690_startup(struct radeon_device *rdev) ...@@ -624,9 +630,6 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -667,7 +670,7 @@ int rs690_suspend(struct radeon_device *rdev) ...@@ -667,7 +670,7 @@ int rs690_suspend(struct radeon_device *rdev)
{ {
r600_audio_fini(rdev); r600_audio_fini(rdev);
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
rs600_irq_disable(rdev); rs600_irq_disable(rdev);
rs400_gart_disable(rdev); rs400_gart_disable(rdev);
return 0; return 0;
...@@ -677,7 +680,7 @@ void rs690_fini(struct radeon_device *rdev) ...@@ -677,7 +680,7 @@ void rs690_fini(struct radeon_device *rdev)
{ {
r600_audio_fini(rdev); r600_audio_fini(rdev);
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
rs400_gart_fini(rdev); rs400_gart_fini(rdev);
...@@ -752,7 +755,7 @@ int rs690_init(struct radeon_device *rdev) ...@@ -752,7 +755,7 @@ int rs690_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
rs400_gart_fini(rdev); rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
......
...@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev) ...@@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r) if (r)
return r; return r;
} }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */ /* Enable IRQ */
rs600_irq_set(rdev); rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
...@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev) ...@@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing CP (%d).\n", r); dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r; return r;
} }
r = r100_wb_init(rdev);
if (r)
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
r = r100_ib_init(rdev); r = r100_ib_init(rdev);
if (r) { if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r); dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
...@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev) ...@@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev) int rv515_suspend(struct radeon_device *rdev)
{ {
r100_cp_disable(rdev); r100_cp_disable(rdev);
r100_wb_disable(rdev); radeon_wb_disable(rdev);
rs600_irq_disable(rdev); rs600_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE) if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev); rv370_pcie_gart_disable(rdev);
...@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) ...@@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev)
{ {
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
rv370_pcie_gart_fini(rdev); rv370_pcie_gart_fini(rdev);
...@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev) ...@@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev)
/* Somethings want wront with the accel init stop accel */ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n"); dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev); r100_cp_fini(rdev);
r100_wb_fini(rdev); radeon_wb_fini(rdev);
r100_ib_fini(rdev); r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev); rv370_pcie_gart_fini(rdev);
......
...@@ -268,6 +268,7 @@ static void rv770_mc_program(struct radeon_device *rdev) ...@@ -268,6 +268,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
void r700_cp_stop(struct radeon_device *rdev) void r700_cp_stop(struct radeon_device *rdev)
{ {
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
} }
static int rv770_cp_load_microcode(struct radeon_device *rdev) static int rv770_cp_load_microcode(struct radeon_device *rdev)
...@@ -1028,19 +1029,12 @@ static int rv770_startup(struct radeon_device *rdev) ...@@ -1028,19 +1029,12 @@ static int rv770_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL; rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
} }
/* pin copy shader into vram */
if (rdev->r600_blit.shader_obj) { /* allocate wb buffer */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); r = radeon_wb_init(rdev);
if (unlikely(r != 0)) if (r)
return r; return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
}
/* Enable IRQ */ /* Enable IRQ */
r = r600_irq_init(rdev); r = r600_irq_init(rdev);
if (r) { if (r) {
...@@ -1059,8 +1053,7 @@ static int rv770_startup(struct radeon_device *rdev) ...@@ -1059,8 +1053,7 @@ static int rv770_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev); r = r600_cp_resume(rdev);
if (r) if (r)
return r; return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
return 0; return 0;
} }
...@@ -1106,7 +1099,7 @@ int rv770_suspend(struct radeon_device *rdev) ...@@ -1106,7 +1099,7 @@ int rv770_suspend(struct radeon_device *rdev)
r700_cp_stop(rdev); r700_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
r600_irq_suspend(rdev); r600_irq_suspend(rdev);
r600_wb_disable(rdev); radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev); rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */ /* unpin shaders bo */
if (rdev->r600_blit.shader_obj) { if (rdev->r600_blit.shader_obj) {
...@@ -1201,8 +1194,8 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1201,8 +1194,8 @@ int rv770_init(struct radeon_device *rdev)
if (r) { if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n"); dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev); r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev); rv770_pcie_gart_fini(rdev);
rdev->accel_working = false; rdev->accel_working = false;
...@@ -1234,8 +1227,8 @@ void rv770_fini(struct radeon_device *rdev) ...@@ -1234,8 +1227,8 @@ void rv770_fini(struct radeon_device *rdev)
{ {
r600_blit_fini(rdev); r600_blit_fini(rdev);
r700_cp_fini(rdev); r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev); rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev); rv770_vram_scratch_fini(rdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment