Commit 7c014334 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'next' of git://people.freedesktop.org/~deathsimple/linux into drm-core-next

* 'next' of git://people.freedesktop.org/~deathsimple/linux:
  drm/radeon: replace cs_mutex with vm_mutex v3
  drm/radeon: replace pflip and sw_int counters with atomics
  drm/radeon: apply Murphy's law to the kms irq code v3
  drm/radeon: fix & improve ih ring handling v3
  drm/radeon: remove some unneeded structure members
  drm/radeon: replace vmram_mutex with mclk_lock v2
  drm/radeon: rework ring syncing code
  drm/radeon: add infrastructure for advanced ring synchronization v2
  drm/radeon: remove radeon_fence_create
parents 4ef7fe7c 36ff39c4
...@@ -428,6 +428,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) ...@@ -428,6 +428,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
...@@ -436,73 +437,64 @@ void evergreen_hpd_init(struct radeon_device *rdev) ...@@ -436,73 +437,64 @@ void evergreen_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp); WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp); WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp); WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp); WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break; break;
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp); WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp); WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break; break;
default: default:
break; break;
} }
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
enabled |= 1 << radeon_connector->hpd.hpd;
} }
if (rdev->irq.installed) radeon_irq_kms_enable_hpd(rdev, enabled);
evergreen_irq_set(rdev);
} }
void evergreen_hpd_fini(struct radeon_device *rdev) void evergreen_hpd_fini(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned disabled = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0); WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0); WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0); WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0); WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break; break;
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0); WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0); WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break; break;
default: default:
break; break;
} }
disabled |= 1 << radeon_connector->hpd.hpd;
} }
radeon_irq_kms_disable_hpd(rdev, disabled);
} }
/* watermark setup */ /* watermark setup */
...@@ -1371,7 +1363,7 @@ void evergreen_mc_program(struct radeon_device *rdev) ...@@ -1371,7 +1363,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/ */
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
/* set to DX10/11 mode */ /* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
...@@ -2348,20 +2340,20 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2348,20 +2340,20 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->family >= CHIP_CAYMAN) { if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */ /* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n"); DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n"); DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
} }
} else { } else {
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
...@@ -2369,32 +2361,32 @@ int evergreen_irq_set(struct radeon_device *rdev) ...@@ -2369,32 +2361,32 @@ int evergreen_irq_set(struct radeon_device *rdev)
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK; crtc1 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[1] || if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) { atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n"); DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK; crtc2 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[2] || if (rdev->irq.crtc_vblank_int[2] ||
rdev->irq.pflip[2]) { atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n"); DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK; crtc3 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[3] || if (rdev->irq.crtc_vblank_int[3] ||
rdev->irq.pflip[3]) { atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n"); DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK; crtc4 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[4] || if (rdev->irq.crtc_vblank_int[4] ||
rdev->irq.pflip[4]) { atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n"); DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK; crtc5 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[5] || if (rdev->irq.crtc_vblank_int[5] ||
rdev->irq.pflip[5]) { atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n"); DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK; crtc6 |= VBLANK_INT_MASK;
} }
...@@ -2676,7 +2668,6 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2676,7 +2668,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr; u32 rptr;
u32 src_id, src_data; u32 src_id, src_data;
u32 ring_index; u32 ring_index;
unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
bool queue_hdmi = false; bool queue_hdmi = false;
...@@ -2684,22 +2675,21 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2684,22 +2675,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE; return IRQ_NONE;
wptr = evergreen_get_ih_wptr(rdev); wptr = evergreen_get_ih_wptr(rdev);
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
rptr = rdev->ih.rptr; rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */ /* Order reading of wptr vs. reading of IH ring data */
rmb(); rmb();
/* display interrupts */ /* display interrupts */
evergreen_irq_ack(rdev); evergreen_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) { while (rptr != wptr) {
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
ring_index = rptr / 4; ring_index = rptr / 4;
...@@ -2716,7 +2706,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2716,7 +2706,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[0]) if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0); radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
...@@ -2742,7 +2732,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2742,7 +2732,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[1]) if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1); radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
...@@ -2768,7 +2758,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2768,7 +2758,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[2]) if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2); radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n"); DRM_DEBUG("IH: D3 vblank\n");
...@@ -2794,7 +2784,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2794,7 +2784,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[3]) if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3); radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n"); DRM_DEBUG("IH: D4 vblank\n");
...@@ -2820,7 +2810,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2820,7 +2810,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[4]) if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4); radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n"); DRM_DEBUG("IH: D5 vblank\n");
...@@ -2846,7 +2836,7 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2846,7 +2836,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[5]) if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5); radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n"); DRM_DEBUG("IH: D6 vblank\n");
...@@ -2986,7 +2976,6 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2986,7 +2976,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n"); DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
break; break;
default: default:
...@@ -2998,17 +2987,19 @@ int evergreen_irq_process(struct radeon_device *rdev) ...@@ -2998,17 +2987,19 @@ int evergreen_irq_process(struct radeon_device *rdev)
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
} }
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug) if (queue_hotplug)
schedule_work(&rdev->hotplug_work); schedule_work(&rdev->hotplug_work);
if (queue_hdmi) if (queue_hdmi)
schedule_work(&rdev->audio_work); schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr); WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rptr)
goto restart_ih;
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -622,7 +622,8 @@ int evergreen_blit_init(struct radeon_device *rdev) ...@@ -622,7 +622,8 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
......
...@@ -850,7 +850,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev, ...@@ -850,7 +850,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
/* set to DX10/11 mode */ /* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
......
...@@ -567,43 +567,27 @@ void r100_hpd_init(struct radeon_device *rdev) ...@@ -567,43 +567,27 @@ void r100_hpd_init(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { enable |= 1 << radeon_connector->hpd.hpd;
case RADEON_HPD_1:
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
} }
if (rdev->irq.installed) radeon_irq_kms_enable_hpd(rdev, enable);
r100_irq_set(rdev);
} }
void r100_hpd_fini(struct radeon_device *rdev) void r100_hpd_fini(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { disable |= 1 << radeon_connector->hpd.hpd;
case RADEON_HPD_1:
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = false;
break;
default:
break;
}
} }
radeon_irq_kms_disable_hpd(rdev, disable);
} }
/* /*
...@@ -705,18 +689,18 @@ int r100_irq_set(struct radeon_device *rdev) ...@@ -705,18 +689,18 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL; return -EINVAL;
} }
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= RADEON_SW_INT_ENABLE; tmp |= RADEON_SW_INT_ENABLE;
} }
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK; tmp |= RADEON_GUI_IDLE_MASK;
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { atomic_read(&rdev->irq.pflip[0])) {
tmp |= RADEON_CRTC_VBLANK_MASK; tmp |= RADEON_CRTC_VBLANK_MASK;
} }
if (rdev->irq.crtc_vblank_int[1] || if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) { atomic_read(&rdev->irq.pflip[1])) {
tmp |= RADEON_CRTC2_VBLANK_MASK; tmp |= RADEON_CRTC2_VBLANK_MASK;
} }
if (rdev->irq.hpd[0]) { if (rdev->irq.hpd[0]) {
...@@ -782,7 +766,6 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -782,7 +766,6 @@ int r100_irq_process(struct radeon_device *rdev)
/* gui idle interrupt */ /* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) { if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true; rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
} }
/* Vertical blank interrupts */ /* Vertical blank interrupts */
...@@ -792,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -792,7 +775,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[0]) if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0); radeon_crtc_handle_flip(rdev, 0);
} }
if (status & RADEON_CRTC2_VBLANK_STAT) { if (status & RADEON_CRTC2_VBLANK_STAT) {
...@@ -801,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -801,7 +784,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[1]) if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1); radeon_crtc_handle_flip(rdev, 1);
} }
if (status & RADEON_FP_DETECT_STAT) { if (status & RADEON_FP_DETECT_STAT) {
...@@ -883,7 +866,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -883,7 +866,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence **fence)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages; uint32_t cur_pages;
...@@ -947,7 +930,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -947,7 +930,7 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE); RADEON_WAIT_DMA_GUI_IDLE);
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring);
return r; return r;
......
...@@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence **fence)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size; uint32_t size;
...@@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring);
return r; return r;
......
...@@ -709,6 +709,7 @@ void r600_hpd_init(struct radeon_device *rdev) ...@@ -709,6 +709,7 @@ void r600_hpd_init(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
...@@ -729,28 +730,22 @@ void r600_hpd_init(struct radeon_device *rdev) ...@@ -729,28 +730,22 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp); WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp); WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp); WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp); WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break; break;
/* DCE 3.2 */ /* DCE 3.2 */
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp); WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp); WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break; break;
default: default:
break; break;
...@@ -759,85 +754,73 @@ void r600_hpd_init(struct radeon_device *rdev) ...@@ -759,85 +754,73 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[1] = true;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[2] = true;
break; break;
default: default:
break; break;
} }
} }
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
} }
if (rdev->irq.installed) radeon_irq_kms_enable_hpd(rdev, enable);
r600_irq_set(rdev);
} }
void r600_hpd_fini(struct radeon_device *rdev) void r600_hpd_fini(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned disable = 0;
if (ASIC_IS_DCE3(rdev)) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (ASIC_IS_DCE3(rdev)) {
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0); WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0); WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0); WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break; break;
case RADEON_HPD_4: case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0); WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break; break;
/* DCE 3.2 */ /* DCE 3.2 */
case RADEON_HPD_5: case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0); WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break; break;
case RADEON_HPD_6: case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0); WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break; break;
default: default:
break; break;
} }
}
} else { } else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) { switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break; break;
case RADEON_HPD_3: case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break; break;
default: default:
break; break;
} }
} }
disable |= 1 << radeon_connector->hpd.hpd;
} }
radeon_irq_kms_disable_hpd(rdev, disable);
} }
/* /*
...@@ -2309,17 +2292,18 @@ int r600_copy_blit(struct radeon_device *rdev, ...@@ -2309,17 +2292,18 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence **fence)
{ {
struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL; struct radeon_sa_bo *vb = NULL;
int r; int r;
r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb); r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
if (r) { if (r) {
return r; return r;
} }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
r600_blit_done_copy(rdev, fence, vb); r600_blit_done_copy(rdev, fence, vb, sem);
return 0; return 0;
} }
...@@ -2607,7 +2591,7 @@ void r600_fini(struct radeon_device *rdev) ...@@ -2607,7 +2591,7 @@ void r600_fini(struct radeon_device *rdev)
*/ */
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
/* FIXME: implement */ /* FIXME: implement */
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
...@@ -2857,7 +2841,6 @@ void r600_disable_interrupts(struct radeon_device *rdev) ...@@ -2857,7 +2841,6 @@ void r600_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0); WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false; rdev->ih.enabled = false;
rdev->ih.wptr = 0;
rdev->ih.rptr = 0; rdev->ih.rptr = 0;
} }
...@@ -3042,18 +3025,18 @@ int r600_irq_set(struct radeon_device *rdev) ...@@ -3042,18 +3025,18 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
} }
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n"); DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n"); DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK; mode_int |= D1MODE_VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[1] || if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) { atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("r600_irq_set: vblank 1\n"); DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK; mode_int |= D2MODE_VBLANK_INT_MASK;
} }
...@@ -3309,7 +3292,6 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3309,7 +3292,6 @@ int r600_irq_process(struct radeon_device *rdev)
u32 rptr; u32 rptr;
u32 src_id, src_data; u32 src_id, src_data;
u32 ring_index; u32 ring_index;
unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
bool queue_hdmi = false; bool queue_hdmi = false;
...@@ -3321,24 +3303,21 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3321,24 +3303,21 @@ int r600_irq_process(struct radeon_device *rdev)
RREG32(IH_RB_WPTR); RREG32(IH_RB_WPTR);
wptr = r600_get_ih_wptr(rdev); wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
spin_lock_irqsave(&rdev->ih.lock, flags); restart_ih:
/* is somebody else already processing irqs? */
if (rptr == wptr) { if (atomic_xchg(&rdev->ih.lock, 1))
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE; return IRQ_NONE;
}
restart_ih: rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */ /* Order reading of wptr vs. reading of IH ring data */
rmb(); rmb();
/* display interrupts */ /* display interrupts */
r600_irq_ack(rdev); r600_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) { while (rptr != wptr) {
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
ring_index = rptr / 4; ring_index = rptr / 4;
...@@ -3355,7 +3334,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3355,7 +3334,7 @@ int r600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[0]) if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0); radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
...@@ -3381,7 +3360,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3381,7 +3360,7 @@ int r600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[1]) if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1); radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
...@@ -3480,7 +3459,6 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3480,7 +3459,6 @@ int r600_irq_process(struct radeon_device *rdev)
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n"); DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
break; break;
default: default:
...@@ -3492,17 +3470,19 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -3492,17 +3470,19 @@ int r600_irq_process(struct radeon_device *rdev)
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
} }
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug) if (queue_hotplug)
schedule_work(&rdev->hotplug_work); schedule_work(&rdev->hotplug_work);
if (queue_hdmi) if (queue_hdmi)
schedule_work(&rdev->audio_work); schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr); WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
if (wptr != rptr)
goto restart_ih;
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -512,7 +512,8 @@ int r600_blit_init(struct radeon_device *rdev) ...@@ -512,7 +512,8 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto; rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state; rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 5; /* done copy */ rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
...@@ -666,7 +667,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages, ...@@ -666,7 +667,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_sa_bo **vb) struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
...@@ -689,34 +691,50 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, ...@@ -689,34 +691,50 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
return r; return r;
} }
r = radeon_semaphore_create(rdev, sem);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
return r;
}
/* calculate number of loops correctly */ /* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop; ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common; ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size); r = radeon_ring_lock(rdev, ring, ring_size);
if (r) { if (r) {
radeon_sa_bo_free(rdev, vb, NULL); radeon_sa_bo_free(rdev, vb, NULL);
radeon_semaphore_free(rdev, sem, NULL);
return r; return r;
} }
if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
} else {
radeon_semaphore_free(rdev, sem, NULL);
}
rdev->r600_blit.primitives.set_default_state(rdev); rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev); rdev->r600_blit.primitives.set_shaders(rdev);
return 0; return 0;
} }
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence, void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb) struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r; int r;
r = radeon_fence_emit(rdev, fence); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
return; return;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring);
radeon_sa_bo_free(rdev, &vb, fence); radeon_sa_bo_free(rdev, &vb, *fence);
radeon_semaphore_free(rdev, &sem, *fence);
} }
void r600_kms_blit_copy(struct radeon_device *rdev, void r600_kms_blit_copy(struct radeon_device *rdev,
......
...@@ -519,8 +519,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) ...@@ -519,8 +519,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed) { if (rdev->irq.installed) {
/* if irq is available use it */ /* if irq is available use it */
rdev->irq.afmt[dig->afmt->id] = true; radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
radeon_irq_set(rdev);
} }
dig->afmt->enabled = true; dig->afmt->enabled = true;
...@@ -556,8 +555,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) ...@@ -556,8 +555,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
offset, radeon_encoder->encoder_id); offset, radeon_encoder->encoder_id);
/* disable irq */ /* disable irq */
rdev->irq.afmt[dig->afmt->id] = false; radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
radeon_irq_set(rdev);
/* Older chipsets not handled by AtomBIOS */ /* Older chipsets not handled by AtomBIOS */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
......
...@@ -113,7 +113,6 @@ extern int radeon_lockup_timeout; ...@@ -113,7 +113,6 @@ extern int radeon_lockup_timeout;
/* fence seq are set to this number when signaled */ /* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL #define RADEON_FENCE_SIGNALED_SEQ 0LL
#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */ /* internal ring indices */
/* r1xx+ has gfx CP ring */ /* r1xx+ has gfx CP ring */
...@@ -160,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) ...@@ -160,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
#endif #endif
bool radeon_get_bios(struct radeon_device *rdev); bool radeon_get_bios(struct radeon_device *rdev);
/*
* Mutex which allows recursive locking from the same process.
*/
struct radeon_mutex {
struct mutex mutex;
struct task_struct *owner;
int level;
};
static inline void radeon_mutex_init(struct radeon_mutex *mutex)
{
mutex_init(&mutex->mutex);
mutex->owner = NULL;
mutex->level = 0;
}
static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
{
if (mutex_trylock(&mutex->mutex)) {
/* The mutex was unlocked before, so it's ours now */
mutex->owner = current;
} else if (mutex->owner != current) {
/* Another process locked the mutex, take it */
mutex_lock(&mutex->mutex);
mutex->owner = current;
}
/* Otherwise the mutex was already locked by this process */
mutex->level++;
}
static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
{
if (--mutex->level > 0)
return;
mutex->owner = NULL;
mutex_unlock(&mutex->mutex);
}
/* /*
* Dummy page * Dummy page
*/ */
...@@ -258,8 +215,8 @@ struct radeon_fence_driver { ...@@ -258,8 +215,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg; uint32_t scratch_reg;
uint64_t gpu_addr; uint64_t gpu_addr;
volatile uint32_t *cpu_addr; volatile uint32_t *cpu_addr;
/* seq is protected by ring emission lock */ /* sync_seq is protected by ring emission lock */
uint64_t seq; uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq; atomic64_t last_seq;
unsigned long last_activity; unsigned long last_activity;
bool initialized; bool initialized;
...@@ -277,8 +234,7 @@ struct radeon_fence { ...@@ -277,8 +234,7 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev); int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev);
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence); bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
...@@ -290,6 +246,27 @@ int radeon_fence_wait_any(struct radeon_device *rdev, ...@@ -290,6 +246,27 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence); void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
struct radeon_fence *b)
{
if (!a) {
return b;
}
if (!b) {
return a;
}
BUG_ON(a->ring != b->ring);
if (a->seq > b->seq) {
return a;
} else {
return b;
}
}
/* /*
* Tiling registers * Tiling registers
...@@ -451,10 +428,9 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, ...@@ -451,10 +428,9 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore); struct radeon_semaphore *semaphore);
int radeon_semaphore_sync_rings(struct radeon_device *rdev, int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool sync_to[RADEON_NUM_RINGS], int signaler, int waiter);
int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev, void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore **semaphore,
struct radeon_fence *fence); struct radeon_fence *fence);
/* /*
...@@ -598,20 +574,17 @@ union radeon_irq_stat_regs { ...@@ -598,20 +574,17 @@ union radeon_irq_stat_regs {
struct radeon_irq { struct radeon_irq {
bool installed; bool installed;
bool sw_int[RADEON_NUM_RINGS]; spinlock_t lock;
atomic_t ring_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS]; bool crtc_vblank_int[RADEON_MAX_CRTCS];
bool pflip[RADEON_MAX_CRTCS]; atomic_t pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue; wait_queue_head_t vblank_queue;
bool hpd[RADEON_MAX_HPD_PINS]; bool hpd[RADEON_MAX_HPD_PINS];
bool gui_idle; bool gui_idle;
bool gui_idle_acked; bool gui_idle_acked;
wait_queue_head_t idle_queue; wait_queue_head_t idle_queue;
bool afmt[RADEON_MAX_AFMT_BLOCKS]; bool afmt[RADEON_MAX_AFMT_BLOCKS];
spinlock_t sw_lock;
int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs; union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[RADEON_MAX_CRTCS];
int pflip_refcount[RADEON_MAX_CRTCS];
}; };
int radeon_irq_kms_init(struct radeon_device *rdev); int radeon_irq_kms_init(struct radeon_device *rdev);
...@@ -620,6 +593,11 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); ...@@ -620,6 +593,11 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
/* /*
* CP & rings. * CP & rings.
...@@ -630,9 +608,11 @@ struct radeon_ib { ...@@ -630,9 +608,11 @@ struct radeon_ib {
uint32_t length_dw; uint32_t length_dw;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t *ptr; uint32_t *ptr;
int ring;
struct radeon_fence *fence; struct radeon_fence *fence;
unsigned vm_id; unsigned vm_id;
bool is_const_ib; bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore; struct radeon_semaphore *semaphore;
}; };
...@@ -690,6 +670,7 @@ struct radeon_vm_funcs { ...@@ -690,6 +670,7 @@ struct radeon_vm_funcs {
}; };
struct radeon_vm_manager { struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm; struct list_head lru_vm;
uint32_t use_bitmap; uint32_t use_bitmap;
struct radeon_sa_manager sa_manager; struct radeon_sa_manager sa_manager;
...@@ -718,13 +699,10 @@ struct r600_ih { ...@@ -718,13 +699,10 @@ struct r600_ih {
struct radeon_bo *ring_obj; struct radeon_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
unsigned rptr; unsigned rptr;
unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size; unsigned ring_size;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t ptr_mask; uint32_t ptr_mask;
spinlock_t lock; atomic_t lock;
bool enabled; bool enabled;
}; };
...@@ -1039,11 +1017,12 @@ struct radeon_power_state { ...@@ -1039,11 +1017,12 @@ struct radeon_power_state {
struct radeon_pm { struct radeon_pm {
struct mutex mutex; struct mutex mutex;
/* write locked while reprogramming mclk */
struct rw_semaphore mclk_lock;
u32 active_crtcs; u32 active_crtcs;
int active_crtc_count; int active_crtc_count;
int req_vblank; int req_vblank;
bool vblank_sync; bool vblank_sync;
bool gui_idle;
fixed20_12 max_bandwidth; fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk; fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk; fixed20_12 igp_system_mclk;
...@@ -1192,20 +1171,20 @@ struct radeon_asic { ...@@ -1192,20 +1171,20 @@ struct radeon_asic {
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence **fence);
u32 blit_ring_index; u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev, int (*dma)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence **fence);
u32 dma_ring_index; u32 dma_ring_index;
/* method used for bo copy */ /* method used for bo copy */
int (*copy)(struct radeon_device *rdev, int (*copy)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence **fence);
/* ring used for bo copies */ /* ring used for bo copies */
u32 copy_ring_index; u32 copy_ring_index;
} copy; } copy;
...@@ -1512,7 +1491,6 @@ struct radeon_device { ...@@ -1512,7 +1491,6 @@ struct radeon_device {
struct radeon_gem gem; struct radeon_gem gem;
struct radeon_pm pm; struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_mutex cs_mutex;
struct radeon_wb wb; struct radeon_wb wb;
struct radeon_dummy_page dummy_page; struct radeon_dummy_page dummy_page;
bool shutdown; bool shutdown;
...@@ -1534,7 +1512,6 @@ struct radeon_device { ...@@ -1534,7 +1512,6 @@ struct radeon_device {
struct work_struct audio_work; struct work_struct audio_work;
int num_crtc; /* number of crtcs */ int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
bool audio_enabled; bool audio_enabled;
struct r600_audio audio_status; /* audio stuff */ struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb; struct notifier_block acpi_nb;
......
...@@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch, uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size); uint32_t offset, uint32_t obj_size);
...@@ -144,7 +144,7 @@ extern int r200_copy_dma(struct radeon_device *rdev, ...@@ -144,7 +144,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev); void r200_set_safe_registers(struct radeon_device *rdev);
/* /*
...@@ -318,7 +318,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); ...@@ -318,7 +318,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence); unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
...@@ -363,9 +363,10 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); ...@@ -363,9 +363,10 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */ /* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages, int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_sa_bo **vb); struct radeon_fence **fence, struct radeon_sa_bo **vb,
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence, struct radeon_semaphore **sem);
struct radeon_sa_bo *vb); void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
void r600_kms_blit_copy(struct radeon_device *rdev, void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr, u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages, unsigned num_gpu_pages,
......
...@@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, ...@@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
switch (flag) { switch (flag) {
case RADEON_BENCHMARK_COPY_DMA: case RADEON_BENCHMARK_COPY_DMA:
r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
if (r)
return r;
r = radeon_copy_dma(rdev, saddr, daddr, r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, size / RADEON_GPU_PAGE_SIZE,
fence); &fence);
break; break;
case RADEON_BENCHMARK_COPY_BLIT: case RADEON_BENCHMARK_COPY_BLIT:
r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
if (r)
return r;
r = radeon_copy_blit(rdev, saddr, daddr, r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, size / RADEON_GPU_PAGE_SIZE,
fence); &fence);
break; break;
default: default:
DRM_ERROR("Unknown copy method\n"); DRM_ERROR("Unknown copy method\n");
......
...@@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority ...@@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0; return 0;
} }
static int radeon_cs_sync_rings(struct radeon_cs_parser *p) static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{ {
bool sync_to_ring[RADEON_NUM_RINGS] = { }; int i;
bool need_sync = false;
int i, r;
for (i = 0; i < p->nrelocs; i++) { for (i = 0; i < p->nrelocs; i++) {
struct radeon_fence *fence; struct radeon_fence *a, *b;
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj) if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue; continue;
fence = p->relocs[i].robj->tbo.sync_obj; a = p->relocs[i].robj->tbo.sync_obj;
if (fence->ring != p->ring && !radeon_fence_signaled(fence)) { b = p->ib.sync_to[a->ring];
sync_to_ring[fence->ring] = true; p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
need_sync = true;
}
}
if (!need_sync) {
return 0;
}
r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
if (r) {
return r;
} }
return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
sync_to_ring, p->ring);
} }
/* XXX: note that this is called from the legacy UMS CS ioctl as well */ /* XXX: note that this is called from the legacy UMS CS ioctl as well */
...@@ -368,10 +352,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, ...@@ -368,10 +352,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n"); DRM_ERROR("Invalid command stream !\n");
return r; return r;
} }
r = radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
parser->ib.vm_id = 0; parser->ib.vm_id = 0;
r = radeon_ib_schedule(rdev, &parser->ib); r = radeon_ib_schedule(rdev, &parser->ib);
if (r) { if (r) {
...@@ -459,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -459,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r; return r;
} }
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
r = radeon_vm_bind(rdev, vm); r = radeon_vm_bind(rdev, vm);
if (r) { if (r) {
...@@ -468,10 +450,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -468,10 +450,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) { if (r) {
goto out; goto out;
} }
r = radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
...@@ -499,7 +478,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -499,7 +478,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
} }
vm->fence = radeon_fence_ref(parser->ib.fence); vm->fence = radeon_fence_ref(parser->ib.fence);
} }
mutex_unlock(&fpriv->vm.mutex); mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r; return r;
} }
...@@ -519,9 +499,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -519,9 +499,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser; struct radeon_cs_parser parser;
int r; int r;
radeon_mutex_lock(&rdev->cs_mutex);
if (!rdev->accel_working) { if (!rdev->accel_working) {
radeon_mutex_unlock(&rdev->cs_mutex);
return -EBUSY; return -EBUSY;
} }
/* initialize parser */ /* initialize parser */
...@@ -535,7 +513,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -535,7 +513,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
r = radeon_cs_parser_relocs(&parser); r = radeon_cs_parser_relocs(&parser);
...@@ -544,7 +521,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -544,7 +521,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to parse relocation %d!\n", r); DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
r = radeon_cs_ib_chunk(rdev, &parser); r = radeon_cs_ib_chunk(rdev, &parser);
...@@ -558,7 +534,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -558,7 +534,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out: out:
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
......
...@@ -728,20 +728,19 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -728,20 +728,19 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we /* mutex initialization are all done here so we
* can recall function without having locking issues */ * can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ring_lock); mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex); mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600) atomic_set(&rdev->ih.lock, 0);
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex); mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex); mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex); init_rwsem(&rdev->pm.mclk_lock);
init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue); init_waitqueue_head(&rdev->irq.idle_queue);
r = radeon_gem_init(rdev); r = radeon_gem_init(rdev);
if (r) if (r)
return r; return r;
/* initialize vm here */ /* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
rdev->vm_manager.use_bitmap = 1; rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20; rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
......
...@@ -61,15 +61,21 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) ...@@ -61,15 +61,21 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
return seq; return seq;
} }
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{ {
/* we are protected by the ring emission mutex */ /* we are protected by the ring emission mutex */
if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
return 0; if ((*fence) == NULL) {
return -ENOMEM;
} }
fence->seq = ++rdev->fence_drv[fence->ring].seq; kref_init(&((*fence)->kref));
radeon_fence_ring_emit(rdev, fence->ring, fence); (*fence)->rdev = rdev;
trace_radeon_fence_emit(rdev->ddev, fence->seq); (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0; return 0;
} }
...@@ -138,25 +144,9 @@ static void radeon_fence_destroy(struct kref *kref) ...@@ -138,25 +144,9 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence; struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref); fence = container_of(kref, struct radeon_fence, kref);
fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence); kfree(fence);
} }
int radeon_fence_create(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
(*fence)->ring = ring;
return 0;
}
static bool radeon_fence_seq_signaled(struct radeon_device *rdev, static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring) u64 seq, unsigned ring)
{ {
...@@ -176,10 +166,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence) ...@@ -176,10 +166,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (!fence) { if (!fence) {
return true; return true;
} }
if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
WARN(1, "Querying an unemitted fence : %p !\n", fence);
return true;
}
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true; return true;
} }
...@@ -444,10 +430,8 @@ int radeon_fence_wait_any(struct radeon_device *rdev, ...@@ -444,10 +430,8 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0; return 0;
} }
if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
seq[i] = fences[i]->seq; seq[i] = fences[i]->seq;
} }
}
r = radeon_fence_wait_any_seq(rdev, seq, intr); r = radeon_fence_wait_any_seq(rdev, seq, intr);
if (r) { if (r) {
...@@ -465,7 +449,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring) ...@@ -465,7 +449,7 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* wait. * wait.
*/ */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].seq) { if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is /* nothing to wait for, last_seq is
already the last emited fence */ already the last emited fence */
return -ENOENT; return -ENOENT;
...@@ -480,7 +464,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) ...@@ -480,7 +464,7 @@ int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
* activity can be scheduled so there won't be concurrent access * activity can be scheduled so there won't be concurrent access
* to seq value. * to seq value.
*/ */
return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
ring, false, false); ring, false, false);
} }
...@@ -508,7 +492,8 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) ...@@ -508,7 +492,8 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
* but it's ok to report slightly wrong fence count here. * but it's ok to report slightly wrong fence count here.
*/ */
radeon_fence_process(rdev, ring); radeon_fence_process(rdev, ring);
emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq); emitted = rdev->fence_drv[ring].sync_seq[ring]
- atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */ /* to avoid 32bits warp around */
if (emitted > 0x10000000) { if (emitted > 0x10000000) {
emitted = 0x10000000; emitted = 0x10000000;
...@@ -516,6 +501,51 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) ...@@ -516,6 +501,51 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
return (unsigned)emitted; return (unsigned)emitted;
} }
bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
{
struct radeon_fence_driver *fdrv;
if (!fence) {
return false;
}
if (fence->ring == dst_ring) {
return false;
}
/* we are protected by the ring mutex */
fdrv = &fence->rdev->fence_drv[dst_ring];
if (fence->seq <= fdrv->sync_seq[fence->ring]) {
return false;
}
return true;
}
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
{
struct radeon_fence_driver *dst, *src;
unsigned i;
if (!fence) {
return;
}
if (fence->ring == dst_ring) {
return;
}
/* we are protected by the ring mutex */
src = &fence->rdev->fence_drv[fence->ring];
dst = &fence->rdev->fence_drv[dst_ring];
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (i == dst_ring) {
continue;
}
dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
}
}
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{ {
uint64_t index; uint64_t index;
...@@ -537,7 +567,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) ...@@ -537,7 +567,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
} }
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring); radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
rdev->fence_drv[ring].initialized = true; rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n", dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
...@@ -546,10 +576,13 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) ...@@ -546,10 +576,13 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{ {
int i;
rdev->fence_drv[ring].scratch_reg = -1; rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL; rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0; rdev->fence_drv[ring].gpu_addr = 0;
rdev->fence_drv[ring].seq = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0); atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies; rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false; rdev->fence_drv[ring].initialized = false;
...@@ -595,7 +628,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) ...@@ -595,7 +628,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
int i; int i, j;
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!rdev->fence_drv[i].initialized) if (!rdev->fence_drv[i].initialized)
...@@ -605,7 +638,13 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data) ...@@ -605,7 +638,13 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
seq_printf(m, "Last signaled fence 0x%016llx\n", seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
seq_printf(m, "Last emitted 0x%016llx\n", seq_printf(m, "Last emitted 0x%016llx\n",
rdev->fence_drv[i].seq); rdev->fence_drv[i].sync_seq[i]);
for (j = 0; j < RADEON_NUM_RINGS; ++j) {
if (i != j && rdev->fence_drv[j].initialized)
seq_printf(m, "Last sync to ring %d 0x%016llx\n",
j, rdev->fence_drv[i].sync_seq[j]);
}
} }
return 0; return 0;
} }
......
...@@ -305,7 +305,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) ...@@ -305,7 +305,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
return r; return r;
} }
/* cs mutex must be lock */ /* global mutex must be lock */
static void radeon_vm_unbind_locked(struct radeon_device *rdev, static void radeon_vm_unbind_locked(struct radeon_device *rdev,
struct radeon_vm *vm) struct radeon_vm *vm)
{ {
...@@ -356,17 +356,17 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev) ...@@ -356,17 +356,17 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev)
{ {
struct radeon_vm *vm, *tmp; struct radeon_vm *vm, *tmp;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
/* unbind all active vm */ /* unbind all active vm */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
} }
rdev->vm_manager.funcs->fini(rdev); rdev->vm_manager.funcs->fini(rdev);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
} }
/* cs mutex must be lock */ /* global mutex must be locked */
void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
...@@ -374,7 +374,7 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -374,7 +374,7 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
} }
/* cs mutex must be lock & vm mutex must be lock */ /* global and local mutex must be locked */
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
struct radeon_vm *vm_evict; struct radeon_vm *vm_evict;
...@@ -478,7 +478,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -478,7 +478,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
if (last_pfn > vm->last_pfn) { if (last_pfn > vm->last_pfn) {
/* release mutex and lock in right order */ /* release mutex and lock in right order */
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
/* and check again */ /* and check again */
if (last_pfn > vm->last_pfn) { if (last_pfn > vm->last_pfn) {
...@@ -487,7 +487,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -487,7 +487,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align; vm->last_pfn = (last_pfn + align) & ~align;
} }
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
} }
head = &vm->va; head = &vm->va;
last_offset = 0; last_offset = 0;
...@@ -542,7 +542,7 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev, ...@@ -542,7 +542,7 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
return addr; return addr;
} }
/* object have to be reserved & cs mutex took & vm mutex took */ /* object have to be reserved & global and local mutex must be locked */
int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_bo *bo, struct radeon_bo *bo,
...@@ -601,10 +601,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, ...@@ -601,10 +601,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL) if (bo_va == NULL)
return 0; return 0;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL); radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list); list_del(&bo_va->bo_list);
...@@ -647,10 +647,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -647,10 +647,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp; struct radeon_bo_va *bo_va, *tmp;
int r; int r;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo */ /* remove all bo */
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
......
...@@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj, ...@@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
{ {
if (r == -EDEADLK) { if (r == -EDEADLK) {
radeon_mutex_lock(&rdev->cs_mutex);
r = radeon_gpu_reset(rdev); r = radeon_gpu_reset(rdev);
if (!r) if (!r)
r = -EAGAIN; r = -EAGAIN;
radeon_mutex_unlock(&rdev->cs_mutex);
} }
return r; return r;
} }
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#include "radeon.h" #include "radeon.h"
#include "atom.h" #include "atom.h"
#define RADEON_WAIT_IDLE_TIMEOUT 200
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
...@@ -62,56 +64,56 @@ static void radeon_hotplug_work_func(struct work_struct *work) ...@@ -62,56 +64,56 @@ static void radeon_hotplug_work_func(struct work_struct *work)
void radeon_driver_irq_preinstall_kms(struct drm_device *dev) void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i; unsigned i;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */ /* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++) for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = false; atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++) for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) { for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false; rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false; atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false; rdev->irq.afmt[i] = false;
} }
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */ /* Clear bits */
radeon_irq_process(rdev); radeon_irq_process(rdev);
} }
int radeon_driver_irq_postinstall_kms(struct drm_device *dev) int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{ {
struct radeon_device *rdev = dev->dev_private;
unsigned i;
dev->max_vblank_count = 0x001fffff; dev->max_vblank_count = 0x001fffff;
for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = true;
radeon_irq_set(rdev);
return 0; return 0;
} }
void radeon_driver_irq_uninstall_kms(struct drm_device *dev) void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i; unsigned i;
if (rdev == NULL) { if (rdev == NULL) {
return; return;
} }
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */ /* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++) for (i = 0; i < RADEON_NUM_RINGS; i++)
rdev->irq.sw_int[i] = false; atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false; rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++) for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false; rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) { for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false; rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false; atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false; rdev->irq.afmt[i] = false;
} }
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
static bool radeon_msi_ok(struct radeon_device *rdev) static bool radeon_msi_ok(struct radeon_device *rdev)
...@@ -168,15 +170,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev) ...@@ -168,15 +170,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
int radeon_irq_kms_init(struct radeon_device *rdev) int radeon_irq_kms_init(struct radeon_device *rdev)
{ {
int i;
int r = 0; int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
spin_lock_init(&rdev->irq.sw_lock); spin_lock_init(&rdev->irq.lock);
for (i = 0; i < rdev->num_crtc; i++)
spin_lock_init(&rdev->irq.pflip_lock[i]);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc); r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) { if (r) {
return r; return r;
...@@ -217,25 +216,28 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) ...@@ -217,25 +216,28 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{ {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); if (!rdev->ddev->irq_enabled)
if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) { return;
rdev->irq.sw_int[ring] = true;
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
} }
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{ {
unsigned long irqflags; unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); if (!rdev->ddev->irq_enabled)
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0); return;
if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
rdev->irq.sw_int[ring] = false; if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
} }
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
...@@ -245,12 +247,14 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) ...@@ -245,12 +247,14 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc) if (crtc < 0 || crtc >= rdev->num_crtc)
return; return;
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); if (!rdev->ddev->irq_enabled)
if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { return;
rdev->irq.pflip[crtc] = true;
if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
} }
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
...@@ -260,12 +264,77 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) ...@@ -260,12 +264,77 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc) if (crtc < 0 || crtc >= rdev->num_crtc)
return; return;
spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); if (!rdev->ddev->irq_enabled)
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); return;
if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
rdev->irq.pflip[crtc] = false; if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
} }
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
{
unsigned long irqflags;
int r;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.gui_idle = true;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.gui_idle = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
}
...@@ -382,29 +382,35 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) ...@@ -382,29 +382,35 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
int r;
if (crtc < 0 || crtc >= rdev->num_crtc) { if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc); DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL; return -EINVAL;
} }
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = true; rdev->irq.crtc_vblank_int[crtc] = true;
r = radeon_irq_set(rdev);
return radeon_irq_set(rdev); spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
} }
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc) { if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc); DRM_ERROR("Invalid crtc %d\n", crtc);
return; return;
} }
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = false; rdev->irq.crtc_vblank_int[crtc] = false;
radeon_irq_set(rdev); radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
} }
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
......
...@@ -154,11 +154,11 @@ int radeon_bo_create(struct radeon_device *rdev, ...@@ -154,11 +154,11 @@ int radeon_bo_create(struct radeon_device *rdev,
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain); radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex); down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL, &bo->placement, page_align, 0, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy); acc_size, sg, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex); up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (domain == RADEON_GEM_DOMAIN_VRAM) { if (domain == RADEON_GEM_DOMAIN_VRAM) {
...@@ -219,9 +219,9 @@ void radeon_bo_unref(struct radeon_bo **bo) ...@@ -219,9 +219,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
return; return;
rdev = (*bo)->rdev; rdev = (*bo)->rdev;
tbo = &((*bo)->tbo); tbo = &((*bo)->tbo);
mutex_lock(&rdev->vram_mutex); down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo); ttm_bo_unref(&tbo);
mutex_unlock(&rdev->vram_mutex); up_read(&rdev->pm.mclk_lock);
if (tbo == NULL) if (tbo == NULL)
*bo = NULL; *bo = NULL;
} }
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#define RADEON_IDLE_LOOP_MS 100 #define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200 #define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200 #define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
static const char *radeon_pm_state_type_name[5] = { static const char *radeon_pm_state_type_name[5] = {
"Default", "Default",
...@@ -251,21 +250,14 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -251,21 +250,14 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
return; return;
mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex); down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock); mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */ /* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) { if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) { if (rdev->irq.installed) {
/* wait for GPU idle */ /* wait for GPU to become idle */
rdev->pm.gui_idle = false; radeon_irq_kms_wait_gui_idle(rdev);
rdev->irq.gui_idle = true;
radeon_irq_set(rdev);
wait_event_interruptible_timeout(
rdev->irq.idle_queue, rdev->pm.gui_idle,
msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
rdev->irq.gui_idle = false;
radeon_irq_set(rdev);
} }
} else { } else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
...@@ -303,7 +295,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -303,7 +295,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
mutex_unlock(&rdev->ring_lock); mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex); up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex); mutex_unlock(&rdev->ddev->struct_mutex);
} }
......
...@@ -42,40 +42,43 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev); ...@@ -42,40 +42,43 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev);
int radeon_ib_get(struct radeon_device *rdev, int ring, int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size) struct radeon_ib *ib, unsigned size)
{ {
int r; int i, r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) { if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r; return r;
} }
r = radeon_fence_create(rdev, &ib->fence, ring);
r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) { if (r) {
dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
return r; return r;
} }
ib->ring = ring;
ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
ib->vm_id = 0; ib->vm_id = 0;
ib->is_const_ib = false; ib->is_const_ib = false;
ib->semaphore = NULL; for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
return 0; return 0;
} }
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
radeon_semaphore_free(rdev, ib->semaphore, ib->fence); radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence); radeon_fence_unref(&ib->fence);
} }
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
int r = 0; bool need_sync = false;
int i, r = 0;
if (!ib->length_dw || !ring->ready) { if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */ /* TODO: Nothings in the ib we should report. */
...@@ -84,13 +87,31 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -84,13 +87,31 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
} }
/* 64 dwords should be enough for fence too */ /* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64); r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
if (r) { if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r; return r;
} }
radeon_ring_ib_execute(rdev, ib->fence->ring, ib); for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_emit(rdev, ib->fence); struct radeon_fence *fence = ib->sync_to[i];
if (radeon_fence_need_sync(fence, ib->ring)) {
need_sync = true;
radeon_semaphore_sync_rings(rdev, ib->semaphore,
fence->ring, ib->ring);
radeon_fence_note_sync(fence, ib->ring);
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
return r;
}
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring);
return 0; return 0;
} }
......
...@@ -349,7 +349,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, ...@@ -349,7 +349,7 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
sa_manager = (*sa_bo)->manager; sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->lock); spin_lock(&sa_manager->lock);
if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) { if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence); (*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist, list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]); &sa_manager->flist[fence->ring]);
......
...@@ -68,70 +68,49 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, ...@@ -68,70 +68,49 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
} }
/* caller must hold ring lock */
int radeon_semaphore_sync_rings(struct radeon_device *rdev, int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool sync_to[RADEON_NUM_RINGS], int signaler, int waiter)
int dst_ring)
{ {
int i = 0, r; int r;
mutex_lock(&rdev->ring_lock); /* no need to signal and wait on the same ring */
r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8); if (signaler == waiter) {
if (r) { return 0;
goto error;
} }
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
/* no need to sync to our own or unused rings */
if (!sync_to[i] || i == dst_ring)
continue;
/* prevent GPU deadlocks */ /* prevent GPU deadlocks */
if (!rdev->ring[i].ready) { if (!rdev->ring[signaler].ready) {
dev_err(rdev->dev, "Trying to sync to a disabled ring!"); dev_err(rdev->dev, "Trying to sync to a disabled ring!");
r = -EINVAL; return -EINVAL;
goto error;
} }
r = radeon_ring_alloc(rdev, &rdev->ring[i], 8); r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
if (r) { if (r) {
goto error; return r;
}
radeon_semaphore_emit_signal(rdev, i, semaphore);
radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
radeon_ring_commit(rdev, &rdev->ring[i]);
} }
radeon_semaphore_emit_signal(rdev, signaler, semaphore);
radeon_ring_commit(rdev, &rdev->ring[signaler]);
radeon_ring_commit(rdev, &rdev->ring[dst_ring]); /* we assume caller has already allocated space on waiters ring */
mutex_unlock(&rdev->ring_lock); radeon_semaphore_emit_wait(rdev, waiter, semaphore);
return 0; return 0;
error:
/* unlock all locks taken so far */
for (--i; i >= 0; --i) {
if (sync_to[i] || i == dst_ring) {
radeon_ring_undo(&rdev->ring[i]);
}
}
radeon_ring_undo(&rdev->ring[dst_ring]);
mutex_unlock(&rdev->ring_lock);
return r;
} }
void radeon_semaphore_free(struct radeon_device *rdev, void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore **semaphore,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
if (semaphore == NULL) { if (semaphore == NULL || *semaphore == NULL) {
return; return;
} }
if (semaphore->waiters > 0) { if ((*semaphore)->waiters > 0) {
dev_err(rdev->dev, "semaphore %p has more waiters than signalers," dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", semaphore); " hardware lockup imminent!\n", *semaphore);
} }
radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence); radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
kfree(semaphore); kfree(*semaphore);
*semaphore = NULL;
} }
...@@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]); radeon_bo_kunmap(gtt_obj[i]);
r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
goto out_cleanup;
}
r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup; goto out_cleanup;
...@@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj); radeon_bo_kunmap(vram_obj);
r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
goto out_cleanup;
}
r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup; goto out_cleanup;
...@@ -245,17 +233,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -245,17 +233,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
int ridxB = radeon_ring_index(rdev, ringB); int ridxB = radeon_ring_index(rdev, ringB);
int r; int r;
r = radeon_fence_create(rdev, &fence1, ridxA);
if (r) {
DRM_ERROR("Failed to create sync fence 1\n");
goto out_cleanup;
}
r = radeon_fence_create(rdev, &fence2, ridxA);
if (r) {
DRM_ERROR("Failed to create sync fence 2\n");
goto out_cleanup;
}
r = radeon_semaphore_create(rdev, &semaphore); r = radeon_semaphore_create(rdev, &semaphore);
if (r) { if (r) {
DRM_ERROR("Failed to create semaphore\n"); DRM_ERROR("Failed to create semaphore\n");
...@@ -268,9 +245,19 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -268,9 +245,19 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ridxA, semaphore); radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
radeon_fence_emit(rdev, fence1); r = radeon_fence_emit(rdev, &fence1, ridxA);
if (r) {
DRM_ERROR("Failed to emit fence 1\n");
radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ridxA, semaphore); radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
radeon_fence_emit(rdev, fence2); r = radeon_fence_emit(rdev, &fence2, ridxA);
if (r) {
DRM_ERROR("Failed to emit fence 2\n");
radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA);
mdelay(1000); mdelay(1000);
...@@ -316,8 +303,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -316,8 +303,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
} }
out_cleanup: out_cleanup:
if (semaphore) radeon_semaphore_free(rdev, &semaphore, NULL);
radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1) if (fence1)
radeon_fence_unref(&fence1); radeon_fence_unref(&fence1);
...@@ -342,17 +328,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -342,17 +328,6 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
bool sigA, sigB; bool sigA, sigB;
int i, r; int i, r;
r = radeon_fence_create(rdev, &fenceA, ridxA);
if (r) {
DRM_ERROR("Failed to create sync fence 1\n");
goto out_cleanup;
}
r = radeon_fence_create(rdev, &fenceB, ridxB);
if (r) {
DRM_ERROR("Failed to create sync fence 2\n");
goto out_cleanup;
}
r = radeon_semaphore_create(rdev, &semaphore); r = radeon_semaphore_create(rdev, &semaphore);
if (r) { if (r) {
DRM_ERROR("Failed to create semaphore\n"); DRM_ERROR("Failed to create semaphore\n");
...@@ -365,7 +340,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -365,7 +340,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ridxA, semaphore); radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
radeon_fence_emit(rdev, fenceA); r = radeon_fence_emit(rdev, &fenceA, ridxA);
if (r) {
DRM_ERROR("Failed to emit sync fence 1\n");
radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA);
r = radeon_ring_lock(rdev, ringB, 64); r = radeon_ring_lock(rdev, ringB, 64);
...@@ -374,7 +354,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -374,7 +354,12 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ridxB, semaphore); radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
radeon_fence_emit(rdev, fenceB); r = radeon_fence_emit(rdev, &fenceB, ridxB);
if (r) {
DRM_ERROR("Failed to create sync fence 2\n");
radeon_ring_unlock_undo(rdev, ringB);
goto out_cleanup;
}
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB);
mdelay(1000); mdelay(1000);
...@@ -436,8 +421,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -436,8 +421,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
} }
out_cleanup: out_cleanup:
if (semaphore) radeon_semaphore_free(rdev, &semaphore, NULL);
radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA) if (fenceA)
radeon_fence_unref(&fenceA); radeon_fence_unref(&fenceA);
......
...@@ -222,15 +222,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -222,15 +222,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{ {
struct radeon_device *rdev; struct radeon_device *rdev;
uint64_t old_start, new_start; uint64_t old_start, new_start;
struct radeon_fence *fence, *old_fence; struct radeon_fence *fence;
struct radeon_semaphore *sem = NULL; int r, ridx;
int r;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev)); ridx = radeon_copy_ring_index(rdev);
if (unlikely(r)) {
return r;
}
old_start = old_mem->start << PAGE_SHIFT; old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT; new_start = new_mem->start << PAGE_SHIFT;
...@@ -243,7 +239,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -243,7 +239,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break; break;
default: default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
radeon_fence_unref(&fence);
return -EINVAL; return -EINVAL;
} }
switch (new_mem->mem_type) { switch (new_mem->mem_type) {
...@@ -255,46 +250,23 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -255,46 +250,23 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break; break;
default: default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
radeon_fence_unref(&fence);
return -EINVAL; return -EINVAL;
} }
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) { if (!rdev->ring[ridx].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n"); DRM_ERROR("Trying to move memory with ring turned off.\n");
radeon_fence_unref(&fence);
return -EINVAL; return -EINVAL;
} }
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */ /* sync other rings */
old_fence = bo->sync_obj; fence = bo->sync_obj;
if (old_fence && old_fence->ring != fence->ring
&& !radeon_fence_signaled(old_fence)) {
bool sync_to_ring[RADEON_NUM_RINGS] = { };
sync_to_ring[old_fence->ring] = true;
r = radeon_semaphore_create(rdev, &sem);
if (r) {
radeon_fence_unref(&fence);
return r;
}
r = radeon_semaphore_sync_rings(rdev, sem,
sync_to_ring, fence->ring);
if (r) {
radeon_semaphore_free(rdev, sem, NULL);
radeon_fence_unref(&fence);
return r;
}
}
r = radeon_copy(rdev, old_start, new_start, r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence); &fence);
/* FIXME: handle copy error */ /* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem); evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
return r; return r;
} }
...@@ -825,9 +797,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -825,9 +797,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
mutex_lock(&rdev->vram_mutex); down_read(&rdev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf); r = ttm_vm_ops->fault(vma, vmf);
mutex_unlock(&rdev->vram_mutex); up_read(&rdev->pm.mclk_lock);
return r; return r;
} }
......
...@@ -294,6 +294,7 @@ void rs600_hpd_init(struct radeon_device *rdev) ...@@ -294,6 +294,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
...@@ -301,26 +302,25 @@ void rs600_hpd_init(struct radeon_device *rdev) ...@@ -301,26 +302,25 @@ void rs600_hpd_init(struct radeon_device *rdev)
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
rdev->irq.hpd[0] = true;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
rdev->irq.hpd[1] = true;
break; break;
default: default:
break; break;
} }
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
} }
if (rdev->irq.installed) radeon_irq_kms_enable_hpd(rdev, enable);
rs600_irq_set(rdev);
} }
void rs600_hpd_fini(struct radeon_device *rdev) void rs600_hpd_fini(struct radeon_device *rdev)
{ {
struct drm_device *dev = rdev->ddev; struct drm_device *dev = rdev->ddev;
struct drm_connector *connector; struct drm_connector *connector;
unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
...@@ -328,17 +328,17 @@ void rs600_hpd_fini(struct radeon_device *rdev) ...@@ -328,17 +328,17 @@ void rs600_hpd_fini(struct radeon_device *rdev)
case RADEON_HPD_1: case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
rdev->irq.hpd[0] = false;
break; break;
case RADEON_HPD_2: case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
rdev->irq.hpd[1] = false;
break; break;
default: default:
break; break;
} }
disable |= 1 << radeon_connector->hpd.hpd;
} }
radeon_irq_kms_disable_hpd(rdev, disable);
} }
int rs600_asic_reset(struct radeon_device *rdev) int rs600_asic_reset(struct radeon_device *rdev)
...@@ -564,18 +564,18 @@ int rs600_irq_set(struct radeon_device *rdev) ...@@ -564,18 +564,18 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL; return -EINVAL;
} }
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1); tmp |= S_000040_SW_INT_EN(1);
} }
if (rdev->irq.gui_idle) { if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1); tmp |= S_000040_GUI_IDLE(1);
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
} }
if (rdev->irq.crtc_vblank_int[1] || if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) { atomic_read(&rdev->irq.pflip[1])) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
} }
if (rdev->irq.hpd[0]) { if (rdev->irq.hpd[0]) {
...@@ -686,7 +686,6 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -686,7 +686,6 @@ int rs600_irq_process(struct radeon_device *rdev)
/* GUI idle */ /* GUI idle */
if (G_000040_GUI_IDLE(status)) { if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true; rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
} }
/* Vertical blank interrupts */ /* Vertical blank interrupts */
...@@ -696,7 +695,7 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -696,7 +695,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[0]) if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0); radeon_crtc_handle_flip(rdev, 0);
} }
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
...@@ -705,7 +704,7 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -705,7 +704,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[1]) if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1); radeon_crtc_handle_flip(rdev, 1);
} }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
......
...@@ -1762,7 +1762,7 @@ void si_fence_ring_emit(struct radeon_device *rdev, ...@@ -1762,7 +1762,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
*/ */
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 header; u32 header;
if (ib->is_const_ib) if (ib->is_const_ib)
...@@ -2702,7 +2702,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -2702,7 +2702,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
if (ib->is_const_ib) if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else { else {
switch (ib->fence->ring) { switch (ib->ring) {
case RADEON_RING_TYPE_GFX_INDEX: case RADEON_RING_TYPE_GFX_INDEX:
ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt); ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
break; break;
...@@ -2711,7 +2711,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -2711,7 +2711,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt); ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
break; break;
default: default:
dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring); dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -2942,7 +2942,6 @@ static void si_disable_interrupts(struct radeon_device *rdev) ...@@ -2942,7 +2942,6 @@ static void si_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0); WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false; rdev->ih.enabled = false;
rdev->ih.wptr = 0;
rdev->ih.rptr = 0; rdev->ih.rptr = 0;
} }
...@@ -3093,45 +3092,45 @@ int si_irq_set(struct radeon_device *rdev) ...@@ -3093,45 +3092,45 @@ int si_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
/* enable CP interrupts on all rings */ /* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n"); DRM_DEBUG("si_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp1\n"); DRM_DEBUG("si_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp2\n"); DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
} }
if (rdev->irq.crtc_vblank_int[0] || if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) { atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n"); DRM_DEBUG("si_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK; crtc1 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[1] || if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) { atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("si_irq_set: vblank 1\n"); DRM_DEBUG("si_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK; crtc2 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[2] || if (rdev->irq.crtc_vblank_int[2] ||
rdev->irq.pflip[2]) { atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("si_irq_set: vblank 2\n"); DRM_DEBUG("si_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK; crtc3 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[3] || if (rdev->irq.crtc_vblank_int[3] ||
rdev->irq.pflip[3]) { atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("si_irq_set: vblank 3\n"); DRM_DEBUG("si_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK; crtc4 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[4] || if (rdev->irq.crtc_vblank_int[4] ||
rdev->irq.pflip[4]) { atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("si_irq_set: vblank 4\n"); DRM_DEBUG("si_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK; crtc5 |= VBLANK_INT_MASK;
} }
if (rdev->irq.crtc_vblank_int[5] || if (rdev->irq.crtc_vblank_int[5] ||
rdev->irq.pflip[5]) { atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("si_irq_set: vblank 5\n"); DRM_DEBUG("si_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK; crtc6 |= VBLANK_INT_MASK;
} }
...@@ -3359,29 +3358,27 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3359,29 +3358,27 @@ int si_irq_process(struct radeon_device *rdev)
u32 rptr; u32 rptr;
u32 src_id, src_data, ring_id; u32 src_id, src_data, ring_id;
u32 ring_index; u32 ring_index;
unsigned long flags;
bool queue_hotplug = false; bool queue_hotplug = false;
if (!rdev->ih.enabled || rdev->shutdown) if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE; return IRQ_NONE;
wptr = si_get_ih_wptr(rdev); wptr = si_get_ih_wptr(rdev);
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
rptr = rdev->ih.rptr; rptr = rdev->ih.rptr;
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */ /* Order reading of wptr vs. reading of IH ring data */
rmb(); rmb();
/* display interrupts */ /* display interrupts */
si_irq_ack(rdev); si_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) { while (rptr != wptr) {
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
ring_index = rptr / 4; ring_index = rptr / 4;
...@@ -3399,7 +3396,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3399,7 +3396,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[0]) if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0); radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
...@@ -3425,7 +3422,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3425,7 +3422,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[1]) if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1); radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
...@@ -3451,7 +3448,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3451,7 +3448,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[2]) if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2); radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n"); DRM_DEBUG("IH: D3 vblank\n");
...@@ -3477,7 +3474,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3477,7 +3474,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[3]) if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3); radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n"); DRM_DEBUG("IH: D4 vblank\n");
...@@ -3503,7 +3500,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3503,7 +3500,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[4]) if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4); radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n"); DRM_DEBUG("IH: D5 vblank\n");
...@@ -3529,7 +3526,7 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3529,7 +3526,7 @@ int si_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true; rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue); wake_up(&rdev->irq.vblank_queue);
} }
if (rdev->irq.pflip[5]) if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5); radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n"); DRM_DEBUG("IH: D6 vblank\n");
...@@ -3620,7 +3617,6 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3620,7 +3617,6 @@ int si_irq_process(struct radeon_device *rdev)
break; break;
case 233: /* GUI IDLE */ case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n"); DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue); wake_up(&rdev->irq.idle_queue);
break; break;
default: default:
...@@ -3632,15 +3628,17 @@ int si_irq_process(struct radeon_device *rdev) ...@@ -3632,15 +3628,17 @@ int si_irq_process(struct radeon_device *rdev)
rptr += 16; rptr += 16;
rptr &= rdev->ih.ptr_mask; rptr &= rdev->ih.ptr_mask;
} }
/* make sure wptr hasn't changed while processing */
wptr = si_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug) if (queue_hotplug)
schedule_work(&rdev->hotplug_work); schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr; rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr); WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags); atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
wptr = si_get_ih_wptr(rdev);
if (wptr != rptr)
goto restart_ih;
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment