Commit 9299795c authored by Dave Airlie's avatar Dave Airlie

Merge remote branch 'korg/drm-radeon-next' into drm-linus

* korg/drm-radeon-next:
  drm/radeon/kms: fix legacy get_engine/memory clock
  drm/radeon/kms/atom: atom parser fixes
  drm/radeon/kms: clean up atombios pll code
  drm/radeon/kms: clean up pll struct
  drm/radeon/kms/atom: fix crtc lock ordering
  drm/radeon: r6xx/r7xx possible security issue, system ram access
  drm/radeon/kms: r600/r700 don't test ib if ib initialization fails
  drm/radeon/kms: Forbid creation of framebuffer with no valid GEM object
  drm/radeon/kms: r600 handle irq vector ring overflow
  drm/radeon/kms: r600/r700 don't process IRQ if not initialized
  drm/radeon/kms: r600/r700 disable irq at suspend
  drm/radeon/kms/r4xx: cleanup atom path
  drm/radeon/kms: fix atombios_crtc_set_base
  drm/radeon/kms/atom: upstream parser updates
  drm/radeon/kms/atom: fix some parser bugs
  drm/radeon/kms: fix hardcoded mmio size in register functions
  drm/radeon/kms/r100: fix bug in CS parser
  drm/radeon/kms/r200: fix bug in CS parser
  drm/radeon/kms/r200: fix bug in CS parser
parents 8d586fe6 38678d35
...@@ -246,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, ...@@ -246,6 +246,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_WS_ATTRIBUTES: case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr; val = gctx->io_attr;
break; break;
case ATOM_WS_REGPTR:
val = gctx->reg_block;
break;
default: default:
val = ctx->ws[idx]; val = ctx->ws[idx];
} }
...@@ -385,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) ...@@ -385,6 +388,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
return atom_get_src_int(ctx, attr, ptr, NULL, 1); return atom_get_src_int(ctx, attr, ptr, NULL, 1);
} }
static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
{
uint32_t val = 0xCDCDCDCD;
switch (align) {
case ATOM_SRC_DWORD:
val = U32(*ptr);
(*ptr) += 4;
break;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
val = U16(*ptr);
(*ptr) += 2;
break;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
case ATOM_SRC_BYTE24:
val = U8(*ptr);
(*ptr)++;
break;
}
return val;
}
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print) int *ptr, uint32_t *saved, int print)
{ {
...@@ -482,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, ...@@ -482,6 +511,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_WS_ATTRIBUTES: case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val; gctx->io_attr = val;
break; break;
case ATOM_WS_REGPTR:
gctx->reg_block = val;
break;
default: default:
ctx->ws[idx] = val; ctx->ws[idx] = val;
} }
...@@ -677,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) ...@@ -677,7 +709,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: "); SDEBUG(" src1: ");
src1 = atom_get_src(ctx, attr, ptr); src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: "); SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr); src2 = atom_get_src(ctx, attr, ptr);
dst &= src1; dst &= src1;
...@@ -809,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) ...@@ -809,6 +841,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
} }
static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{ {
uint8_t attr = U8((*ptr)++), shift; uint8_t attr = U8((*ptr)++), shift;
...@@ -818,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) ...@@ -818,7 +882,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6; attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = U8((*ptr)++); shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift); SDEBUG(" shift: %d\n", shift);
dst <<= shift; dst <<= shift;
SDEBUG(" dst: "); SDEBUG(" dst: ");
...@@ -834,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) ...@@ -834,7 +898,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6; attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: "); SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = U8((*ptr)++); shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift); SDEBUG(" shift: %d\n", shift);
dst >>= shift; dst >>= shift;
SDEBUG(" dst: "); SDEBUG(" dst: ");
...@@ -937,18 +1001,18 @@ static struct { ...@@ -937,18 +1001,18 @@ static struct {
atom_op_or, ATOM_ARG_FB}, { atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, { atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, { atom_op_or, ATOM_ARG_MC}, {
atom_op_shl, ATOM_ARG_REG}, { atom_op_shift_left, ATOM_ARG_REG}, {
atom_op_shl, ATOM_ARG_PS}, { atom_op_shift_left, ATOM_ARG_PS}, {
atom_op_shl, ATOM_ARG_WS}, { atom_op_shift_left, ATOM_ARG_WS}, {
atom_op_shl, ATOM_ARG_FB}, { atom_op_shift_left, ATOM_ARG_FB}, {
atom_op_shl, ATOM_ARG_PLL}, { atom_op_shift_left, ATOM_ARG_PLL}, {
atom_op_shl, ATOM_ARG_MC}, { atom_op_shift_left, ATOM_ARG_MC}, {
atom_op_shr, ATOM_ARG_REG}, { atom_op_shift_right, ATOM_ARG_REG}, {
atom_op_shr, ATOM_ARG_PS}, { atom_op_shift_right, ATOM_ARG_PS}, {
atom_op_shr, ATOM_ARG_WS}, { atom_op_shift_right, ATOM_ARG_WS}, {
atom_op_shr, ATOM_ARG_FB}, { atom_op_shift_right, ATOM_ARG_FB}, {
atom_op_shr, ATOM_ARG_PLL}, { atom_op_shift_right, ATOM_ARG_PLL}, {
atom_op_shr, ATOM_ARG_MC}, { atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, { atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, { atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, { atom_op_mul, ATOM_ARG_WS}, {
...@@ -1058,8 +1122,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 ...@@ -1058,8 +1122,6 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps); SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
/* reset reg block */
ctx->reg_block = 0;
ectx.ctx = ctx; ectx.ctx = ctx;
ectx.ps_shift = ps / 4; ectx.ps_shift = ps / 4;
ectx.start = base; ectx.start = base;
...@@ -1096,6 +1158,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3 ...@@ -1096,6 +1158,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{ {
mutex_lock(&ctx->mutex); mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
atom_execute_table_locked(ctx, index, params); atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
} }
......
...@@ -91,6 +91,7 @@ ...@@ -91,6 +91,7 @@
#define ATOM_WS_AND_MASK 0x45 #define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46 #define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47 #define ATOM_WS_ATTRIBUTES 0x47
#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0 #define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1 #define ATOM_IIO_START 1
......
This diff is collapsed.
...@@ -1504,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p, ...@@ -1504,6 +1504,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL; return -EINVAL;
} }
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1); track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1; track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); r = r100_cs_track_check(p->rdev, track);
......
...@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p, ...@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case 5: case 5:
case 6: case 6:
case 7: case 7:
/* 1D/2D */
track->textures[i].tex_coord_type = 0; track->textures[i].tex_coord_type = 0;
break; break;
case 1: case 1:
track->textures[i].tex_coord_type = 1; /* CUBE */
track->textures[i].tex_coord_type = 2;
break; break;
case 2: case 2:
track->textures[i].tex_coord_type = 2; /* 3D */
track->textures[i].tex_coord_type = 1;
break; break;
} }
break; break;
......
...@@ -1954,6 +1954,7 @@ int r600_suspend(struct radeon_device *rdev) ...@@ -1954,6 +1954,7 @@ int r600_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev); r600_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
r600_irq_suspend(rdev);
r600_wb_disable(rdev); r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev); r600_pcie_gart_disable(rdev);
/* unpin shaders bo */ /* unpin shaders bo */
...@@ -2063,13 +2064,14 @@ int r600_init(struct radeon_device *rdev) ...@@ -2063,13 +2064,14 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
} }
} }
...@@ -2200,14 +2202,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -2200,14 +2202,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 4); rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4; ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size; rdev->ih.ring_size = ring_size;
rdev->ih.align_mask = 4 - 1; rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
rdev->ih.rptr = 0;
} }
static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) static int r600_ih_ring_alloc(struct radeon_device *rdev)
{ {
int r; int r;
rdev->ih.ring_size = ring_size;
/* Allocate ring buffer */ /* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) { if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
...@@ -2237,9 +2239,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) ...@@ -2237,9 +2239,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
return r; return r;
} }
} }
rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->ih.rptr = 0;
return 0; return 0;
} }
...@@ -2389,7 +2388,7 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -2389,7 +2388,7 @@ int r600_irq_init(struct radeon_device *rdev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl; u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
/* allocate ring */ /* allocate ring */
ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); ret = r600_ih_ring_alloc(rdev);
if (ret) if (ret)
return ret; return ret;
...@@ -2452,10 +2451,15 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -2452,10 +2451,15 @@ int r600_irq_init(struct radeon_device *rdev)
return ret; return ret;
} }
void r600_irq_fini(struct radeon_device *rdev) void r600_irq_suspend(struct radeon_device *rdev)
{ {
r600_disable_interrupts(rdev); r600_disable_interrupts(rdev);
r600_rlc_stop(rdev); r600_rlc_stop(rdev);
}
void r600_irq_fini(struct radeon_device *rdev)
{
r600_irq_suspend(rdev);
r600_ih_ring_fini(rdev); r600_ih_ring_fini(rdev);
} }
...@@ -2470,8 +2474,12 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -2470,8 +2474,12 @@ int r600_irq_set(struct radeon_device *rdev)
return -EINVAL; return -EINVAL;
} }
/* don't enable anything if the ih is disabled */ /* don't enable anything if the ih is disabled */
if (!rdev->ih.enabled) if (!rdev->ih.enabled) {
r600_disable_interrupts(rdev);
/* force the active interrupt state to all disabled */
r600_disable_interrupt_state(rdev);
return 0; return 0;
}
if (ASIC_IS_DCE3(rdev)) { if (ASIC_IS_DCE3(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
...@@ -2641,16 +2649,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) ...@@ -2641,16 +2649,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR); wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) { if (wptr & RB_OVERFLOW) {
WARN_ON(1); /* When a ring buffer overflow happen start parsing interrupt
/* XXX deal with overflow */ * from the last not overwritten vector (wptr + 16). Hopefully
DRM_ERROR("IH RB overflow\n"); * this should allow us to catchup.
*/
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
} }
wptr = wptr & WPTR_OFFSET_MASK; return (wptr & rdev->ih.ptr_mask);
return wptr;
} }
/* r600 IV Ring /* r600 IV Ring
...@@ -2686,12 +2696,13 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -2686,12 +2696,13 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev); u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr; u32 rptr = rdev->ih.rptr;
u32 src_id, src_data; u32 src_id, src_data;
u32 last_entry = rdev->ih.ring_size - 16;
u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
unsigned long flags; unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
if (!rdev->ih.enabled)
return IRQ_NONE;
spin_lock_irqsave(&rdev->ih.lock, flags); spin_lock_irqsave(&rdev->ih.lock, flags);
...@@ -2820,10 +2831,8 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -2820,10 +2831,8 @@ int r600_irq_process(struct radeon_device *rdev)
} }
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
if (rptr == last_entry) rptr += 16;
rptr = 0; rptr &= rdev->ih.ptr_mask;
else
rptr += 16;
} }
/* make sure wptr hasn't changed while processing */ /* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev); wptr = r600_get_ih_wptr(rdev);
......
...@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, ...@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
struct r600_cs_track {
u32 cb_color0_base_last;
};
/** /**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context. * @parser: parser structure holding parsing context.
...@@ -176,6 +180,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, ...@@ -176,6 +180,28 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
return 0; return 0;
} }
/**
* r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
* @parser: parser structure holding parsing context.
*
* Check next packet is relocation packet3, do bo validation and compute
* GPU offset using the provided start.
**/
static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
{
struct radeon_cs_packet p3reloc;
int r;
r = r600_cs_packet_parse(p, &p3reloc, p->idx);
if (r) {
return 0;
}
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
return 0;
}
return 1;
}
/** /**
* r600_cs_packet_next_vline() - parse userspace VLINE packet * r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context. * @parser: parser structure holding parsing context.
...@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt) struct radeon_cs_packet *pkt)
{ {
struct radeon_cs_reloc *reloc; struct radeon_cs_reloc *reloc;
struct r600_cs_track *track;
volatile u32 *ib; volatile u32 *ib;
unsigned idx; unsigned idx;
unsigned i; unsigned i;
...@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r; int r;
u32 idx_value; u32 idx_value;
track = (struct r600_cs_track *)p->track;
ib = p->ib->ptr; ib = p->ib->ptr;
idx = pkt->idx + 1; idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx); idx_value = radeon_get_ib_value(p, idx);
...@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) { for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i); reg = start_reg + (4 * i);
switch (reg) { switch (reg) {
/* This register were added late, there is userspace
* which does provide relocation for those but set
* 0 offset. In order to avoid breaking old userspace
* we detect this and set address to point to last
* CB_COLOR0_BASE, note that if userspace doesn't set
* CB_COLOR0_BASE before this register we will report
* error. Old userspace always set CB_COLOR0_BASE
* before any of this.
*/
case R_0280E0_CB_COLOR0_FRAG:
case R_0280E4_CB_COLOR1_FRAG:
case R_0280E8_CB_COLOR2_FRAG:
case R_0280EC_CB_COLOR3_FRAG:
case R_0280F0_CB_COLOR4_FRAG:
case R_0280F4_CB_COLOR5_FRAG:
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
case R_0280C0_CB_COLOR0_TILE:
case R_0280C4_CB_COLOR1_TILE:
case R_0280C8_CB_COLOR2_TILE:
case R_0280CC_CB_COLOR3_TILE:
case R_0280D0_CB_COLOR4_TILE:
case R_0280D4_CB_COLOR5_TILE:
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
if (!r600_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color0_base_last) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
}
ib[idx+1+i] = track->cb_color0_base_last;
printk_once(KERN_WARNING "You have old & broken userspace "
"please consider updating mesa & xf86-video-ati\n");
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
break;
case DB_DEPTH_BASE: case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE: case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE: case CB_COLOR0_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color0_base_last = ib[idx+1+i];
break;
case CB_COLOR1_BASE: case CB_COLOR1_BASE:
case CB_COLOR2_BASE: case CB_COLOR2_BASE:
case CB_COLOR3_BASE: case CB_COLOR3_BASE:
...@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ...@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r600_cs_parse(struct radeon_cs_parser *p) int r600_cs_parse(struct radeon_cs_parser *p)
{ {
struct radeon_cs_packet pkt; struct radeon_cs_packet pkt;
struct r600_cs_track *track;
int r; int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
p->track = track;
do { do {
r = r600_cs_packet_parse(p, &pkt, p->idx); r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) { if (r) {
...@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, ...@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* initialize parser */ /* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser)); memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp; parser.filp = filp;
parser.dev = &dev->pdev->dev;
parser.rdev = NULL; parser.rdev = NULL;
parser.family = family; parser.family = family;
parser.ib = &fake_ib; parser.ib = &fake_ib;
......
...@@ -882,4 +882,29 @@ ...@@ -882,4 +882,29 @@
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_0280E0_BASE_256B 0x00000000
#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
#define R_0280C0_CB_COLOR0_TILE 0x0280C0
#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_0280C0_BASE_256B 0x00000000
#define R_0280C4_CB_COLOR1_TILE 0x0280C4
#define R_0280C8_CB_COLOR2_TILE 0x0280C8
#define R_0280CC_CB_COLOR3_TILE 0x0280CC
#define R_0280D0_CB_COLOR4_TILE 0x0280D0
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
#endif #endif
...@@ -410,7 +410,6 @@ struct r600_ih { ...@@ -410,7 +410,6 @@ struct r600_ih {
unsigned wptr_old; unsigned wptr_old;
unsigned ring_size; unsigned ring_size;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask; uint32_t ptr_mask;
spinlock_t lock; spinlock_t lock;
bool enabled; bool enabled;
...@@ -465,6 +464,7 @@ struct radeon_cs_chunk { ...@@ -465,6 +464,7 @@ struct radeon_cs_chunk {
}; };
struct radeon_cs_parser { struct radeon_cs_parser {
struct device *dev;
struct radeon_device *rdev; struct radeon_device *rdev;
struct drm_file *filp; struct drm_file *filp;
/* chunks */ /* chunks */
...@@ -847,7 +847,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, ...@@ -847,7 +847,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{ {
if (reg < 0x10000) if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg); return readl(((void __iomem *)rdev->rmmio) + reg);
else { else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
...@@ -857,7 +857,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) ...@@ -857,7 +857,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{ {
if (reg < 0x10000) if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg); writel(v, ((void __iomem *)rdev->rmmio) + reg);
else { else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
...@@ -1162,7 +1162,8 @@ extern int r600_irq_init(struct radeon_device *rdev); ...@@ -1162,7 +1162,8 @@ extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev); extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev); extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev); extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder); extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
......
...@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) ...@@ -56,7 +56,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
else if (post_div == 3) else if (post_div == 3)
sclk >>= 2; sclk >>= 2;
else if (post_div == 4) else if (post_div == 4)
sclk >>= 4; sclk >>= 3;
return sclk; return sclk;
} }
...@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) ...@@ -86,7 +86,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
else if (post_div == 3) else if (post_div == 3)
mclk >>= 2; mclk >>= 2;
else if (post_div == 4) else if (post_div == 4)
mclk >>= 4; mclk >>= 3;
return mclk; return mclk;
} }
......
...@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -231,6 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memset(&parser, 0, sizeof(struct radeon_cs_parser)); memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp; parser.filp = filp;
parser.rdev = rdev; parser.rdev = rdev;
parser.dev = rdev->dev;
r = radeon_cs_parser_init(&parser, data); r = radeon_cs_parser_init(&parser, data);
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
......
...@@ -411,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -411,11 +411,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p, uint32_t *fb_div_p,
uint32_t *frac_fb_div_p, uint32_t *frac_fb_div_p,
uint32_t *ref_div_p, uint32_t *ref_div_p,
uint32_t *post_div_p, uint32_t *post_div_p)
int flags)
{ {
uint32_t min_ref_div = pll->min_ref_div; uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div; uint32_t max_ref_div = pll->max_ref_div;
uint32_t min_post_div = pll->min_post_div;
uint32_t max_post_div = pll->max_post_div;
uint32_t min_fractional_feed_div = 0; uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0; uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco; uint32_t best_vco = pll->best_vco;
...@@ -431,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -431,7 +432,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000; freq = freq * 1000;
if (flags & RADEON_PLL_USE_REF_DIV) if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div; min_ref_div = max_ref_div = pll->reference_div;
else { else {
while (min_ref_div < max_ref_div-1) { while (min_ref_div < max_ref_div-1) {
...@@ -446,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -446,19 +447,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
} }
} }
if (flags & RADEON_PLL_USE_FRAC_FB_DIV) { if (pll->flags & RADEON_PLL_USE_POST_DIV)
min_post_div = max_post_div = pll->post_div;
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div; min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div; max_fractional_feed_div = pll->max_frac_feedback_div;
} }
for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) { for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div; uint32_t ref_div;
if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue; continue;
/* legacy radeons only have a few post_divs */ /* legacy radeons only have a few post_divs */
if (flags & RADEON_PLL_LEGACY) { if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) || if ((post_div == 5) ||
(post_div == 7) || (post_div == 7) ||
(post_div == 9) || (post_div == 9) ||
...@@ -505,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -505,7 +509,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div); current_freq = radeon_div(tmp, ref_div * post_div);
if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq; error = freq - current_freq;
error = error < 0 ? 0xffffffff : error; error = error < 0 ? 0xffffffff : error;
} else } else
...@@ -532,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll, ...@@ -532,12 +536,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
best_freq = current_freq; best_freq = current_freq;
best_error = error; best_error = error;
best_vco_diff = vco_diff; best_vco_diff = vco_diff;
} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) || } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) || ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) || ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) || ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) || ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) { ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div; best_post_div = post_div;
best_ref_div = ref_div; best_ref_div = ref_div;
best_feedback_div = feedback_div; best_feedback_div = feedback_div;
...@@ -573,8 +577,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, ...@@ -573,8 +577,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p, uint32_t *fb_div_p,
uint32_t *frac_fb_div_p, uint32_t *frac_fb_div_p,
uint32_t *ref_div_p, uint32_t *ref_div_p,
uint32_t *post_div_p, uint32_t *post_div_p)
int flags)
{ {
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min; fixed20_12 pll_out_max, pll_out_min;
...@@ -715,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev, ...@@ -715,7 +718,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj; struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
return radeon_framebuffer_create(dev, mode_cmd, obj); return radeon_framebuffer_create(dev, mode_cmd, obj);
} }
......
...@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) ...@@ -339,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
} }
} }
/* properly set crtc bpp when using atombios */
void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
int format;
uint32_t crtc_gen_cntl;
uint32_t disp_merge_cntl;
uint32_t crtc_pitch;
switch (crtc->fb->bits_per_pixel) {
case 8:
format = 2;
break;
case 15: /* 555 */
format = 3;
break;
case 16: /* 565 */
format = 4;
break;
case 24: /* RGB */
format = 5;
break;
case 32: /* xRGB */
format = 6;
break;
default:
return;
}
crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
((crtc->fb->bits_per_pixel * 8) - 1)) /
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
switch (radeon_crtc->crtc_id) {
case 0:
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
crtc_gen_cntl |= (format << 8);
crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
break;
case 1:
disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
crtc_gen_cntl |= (format << 8);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
break;
}
}
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb) struct drm_framebuffer *old_fb)
{ {
...@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -755,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
uint32_t post_divider = 0; uint32_t post_divider = 0;
uint32_t freq = 0; uint32_t freq = 0;
uint8_t pll_gain; uint8_t pll_gain;
int pll_flags = RADEON_PLL_LEGACY;
bool use_bios_divs = false; bool use_bios_divs = false;
/* PLL registers */ /* PLL registers */
uint32_t pll_ref_div = 0; uint32_t pll_ref_div = 0;
...@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -789,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else else
pll = &rdev->clock.p1pll; pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */ if (mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) { if (encoder->crtc == crtc) {
...@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -804,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
} }
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) { if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
...@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -819,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
} }
} }
} }
pll_flags |= RADEON_PLL_USE_REF_DIV; pll->flags |= RADEON_PLL_USE_REF_DIV;
} }
} }
} }
...@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) ...@@ -829,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!use_bios_divs) { if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock, radeon_compute_pll(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div, &freq, &feedback_div, &frac_fb_div,
&reference_div, &post_divider, &reference_div, &post_divider);
pll_flags);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) { for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider) if (post_div->divider == post_divider)
......
...@@ -125,16 +125,24 @@ struct radeon_tmds_pll { ...@@ -125,16 +125,24 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
struct radeon_pll { struct radeon_pll {
uint16_t reference_freq; /* reference frequency */
uint16_t reference_div; uint32_t reference_freq;
/* fixed dividers */
uint32_t reference_div;
uint32_t post_div;
/* pll in/out limits */
uint32_t pll_in_min; uint32_t pll_in_min;
uint32_t pll_in_max; uint32_t pll_in_max;
uint32_t pll_out_min; uint32_t pll_out_min;
uint32_t pll_out_max; uint32_t pll_out_max;
uint16_t xclk; uint32_t best_vco;
/* divider limits */
uint32_t min_ref_div; uint32_t min_ref_div;
uint32_t max_ref_div; uint32_t max_ref_div;
uint32_t min_post_div; uint32_t min_post_div;
...@@ -143,7 +151,12 @@ struct radeon_pll { ...@@ -143,7 +151,12 @@ struct radeon_pll {
uint32_t max_feedback_div; uint32_t max_feedback_div;
uint32_t min_frac_feedback_div; uint32_t min_frac_feedback_div;
uint32_t max_frac_feedback_div; uint32_t max_frac_feedback_div;
uint32_t best_vco;
/* flags for the current clock */
uint32_t flags;
/* pll id */
uint32_t id;
}; };
struct radeon_i2c_chan { struct radeon_i2c_chan {
...@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll, ...@@ -417,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p, uint32_t *fb_div_p,
uint32_t *frac_fb_div_p, uint32_t *frac_fb_div_p,
uint32_t *ref_div_p, uint32_t *ref_div_p,
uint32_t *post_div_p, uint32_t *post_div_p);
int flags);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll, extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq, uint64_t freq,
...@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll, ...@@ -426,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p, uint32_t *fb_div_p,
uint32_t *frac_fb_div_p, uint32_t *frac_fb_div_p,
uint32_t *ref_div_p, uint32_t *ref_div_p,
uint32_t *post_div_p, uint32_t *post_div_p);
int flags);
extern void radeon_setup_encoder_clones(struct drm_device *dev); extern void radeon_setup_encoder_clones(struct drm_device *dev);
...@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); ...@@ -453,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb); struct drm_framebuffer *old_fb);
extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv, struct drm_file *file_priv,
......
...@@ -91,6 +91,8 @@ r200 0x3294 ...@@ -91,6 +91,8 @@ r200 0x3294
0x22b8 SE_TCL_TEX_CYL_WRAP_CTL 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
0x22c4 SE_TCL_POINT_SPRITE_CNTL 0x22c4 SE_TCL_POINT_SPRITE_CNTL
0x22d0 SE_PVS_CNTL
0x22d4 SE_PVS_CONST_CNTL
0x2648 RE_POINTSIZE 0x2648 RE_POINTSIZE
0x26c0 RE_TOP_LEFT 0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC 0x26c4 RE_MISC
......
...@@ -968,6 +968,7 @@ int rv770_suspend(struct radeon_device *rdev) ...@@ -968,6 +968,7 @@ int rv770_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev); r700_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
r600_irq_suspend(rdev);
r600_wb_disable(rdev); r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev); rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */ /* unpin shaders bo */
...@@ -1074,13 +1075,14 @@ int rv770_init(struct radeon_device *rdev) ...@@ -1074,13 +1075,14 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev); r = radeon_ib_pool_init(rdev);
if (r) { if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false; rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
} }
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment