Commit b72a8925 authored by Daniel Vetter's avatar Daniel Vetter Committed by Dave Airlie

drm/radeon: s/drm_order/order_base_2/

Last driver and pretty obviously a major user of this little function.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@gmail.com>
parent 0e267944
...@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev) ...@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */ /* ring 0 - compute and gfx */
/* Set ring buffer size */ /* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ...@@ -2915,7 +2915,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(CP_HPD_EOP_CONTROL); tmp = RREG32(CP_HPD_EOP_CONTROL);
tmp &= ~EOP_SIZE_MASK; tmp &= ~EOP_SIZE_MASK;
tmp |= drm_order(MEC_HPD_SIZE / 8); tmp |= order_base_2(MEC_HPD_SIZE / 8);
WREG32(CP_HPD_EOP_CONTROL, tmp); WREG32(CP_HPD_EOP_CONTROL, tmp);
} }
cik_srbm_select(rdev, 0, 0, 0, 0); cik_srbm_select(rdev, 0, 0, 0, 0);
...@@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ...@@ -3030,9 +3030,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK); ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
mqd->queue_state.cp_hqd_pq_control |= mqd->queue_state.cp_hqd_pq_control |=
drm_order(rdev->ring[idx].ring_size / 8); order_base_2(rdev->ring[idx].ring_size / 8);
mqd->queue_state.cp_hqd_pq_control |= mqd->queue_state.cp_hqd_pq_control |=
(drm_order(RADEON_GPU_PAGE_SIZE/8) << 8); (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT; mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
#endif #endif
...@@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev) ...@@ -3375,7 +3375,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
/* Set ring buffer size in dwords */ /* Set ring buffer size in dwords */
rb_bufsz = drm_order(ring->ring_size / 4); rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1; rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
...@@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev) ...@@ -5030,7 +5030,7 @@ static int cik_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = drm_order(rdev->ih.ring_size / 4); rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR | IH_WPTR_OVERFLOW_CLEAR |
......
...@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET); RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
......
...@@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1560,8 +1560,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */ /* Set ring buffer size */
ring = &rdev->ring[ridx[i]]; ring = &rdev->ring[ridx[i]];
rb_cntl = drm_order(ring->ring_size / 8); rb_cntl = order_base_2(ring->ring_size / 8);
rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT; rb_cntl |= BUF_SWAP_32BIT;
#endif #endif
...@@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev) ...@@ -1720,7 +1720,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
/* Set ring buffer size in dwords */ /* Set ring buffer size in dwords */
rb_bufsz = drm_order(ring->ring_size / 4); rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1; rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
......
...@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
} }
/* Align ring size */ /* Align ring size */
rb_bufsz = drm_order(ring_size / 8); rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4; ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev); r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
......
...@@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -2413,8 +2413,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0); WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign ...@@ -2467,7 +2467,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r; int r;
/* Align ring size */ /* Align ring size */
rb_bufsz = drm_order(ring_size / 8); rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4; ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size; ring->ring_size = ring_size;
ring->align_mask = 16 - 1; ring->align_mask = 16 - 1;
...@@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev) ...@@ -2547,7 +2547,7 @@ int r600_dma_resume(struct radeon_device *rdev)
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
/* Set ring buffer size in dwords */ /* Set ring buffer size in dwords */
rb_bufsz = drm_order(ring->ring_size / 4); rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1; rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
...@@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) ...@@ -2656,7 +2656,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
/* Set ring buffer size */ /* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size); rb_bufsz = order_base_2(ring->ring_size);
rb_bufsz = (0x1 << 8) | rb_bufsz; rb_bufsz = (0x1 << 8) | rb_bufsz;
WREG32(UVD_RBC_RB_CNTL, rb_bufsz); WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
...@@ -3812,7 +3812,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -3812,7 +3812,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz; u32 rb_bufsz;
/* Align ring size */ /* Align ring size */
rb_bufsz = drm_order(ring_size / 4); rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4; ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size; rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1; rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
...@@ -4049,7 +4049,7 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -4049,7 +4049,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = drm_order(rdev->ih.ring_size / 4); rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR | IH_WPTR_OVERFLOW_CLEAR |
......
...@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, ...@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32)); + init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size; dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8); dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16); dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
......
...@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, ...@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32)); + init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size; dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
......
...@@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev) ...@@ -3383,8 +3383,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */ /* ring 0 - compute and gfx */
/* Set ring buffer size */ /* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev) ...@@ -3416,8 +3416,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring1 - compute only */ /* ring1 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev) ...@@ -3442,8 +3442,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring2 - compute only */ /* ring2 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8); rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT; tmp |= BUF_SWAP_32BIT;
#endif #endif
...@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev) ...@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl); WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = drm_order(rdev->ih.ring_size / 4); rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR | IH_WPTR_OVERFLOW_CLEAR |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment