Commit b401796c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This is radeon and intel fixes, and is a small bit larger than I'm
  guessing you'd like it to be.

   - i915: fixes 32-bit highmem i915 blank screen, semaphore hang and
     runtime pm fix

   - radeon: gpuvm stability fix for hangs since 3.15, and hang/reboot
     regression on TN/RL devices,

  The only slightly controversial one is the change to use GB for the
  vm_size, which I'm letting through as its a new interface we defined
  in this merge window, and I'd prefer to have the released kernel have
  the final interface rather than changing it later"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: fix cut and paste issue for hawaii.
  drm/radeon: fix irq ring buffer overflow handling
  drm/i915: Simplify i915_gem_release_all_mmaps()
  drm/radeon: fix error handling in radeon_vm_bo_set_addr
  drm/i915: fix freeze with blank screen booting highmem
  drm/i915: Reorder the semaphore deadlock check, again
  drm/radeon/TN: only enable bapm on MSI systems
  drm/radeon: fix VM IB handling
  drm/radeon: fix handling of radeon_vm_bo_rmv v3
  drm/radeon: let's use GB for vm_size (v2)
parents 9c550218 1b2c4869
...@@ -1616,22 +1616,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1616,22 +1616,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ret; return ret;
} }
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct i915_vma *vma;
/*
* Only the global gtt is relevant for gtt memory mappings, so restrict
* list traversal to objects bound into the global address space. Note
* that the active list should be empty, but better safe than sorry.
*/
WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
i915_gem_release_mmap(vma->obj);
list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
i915_gem_release_mmap(vma->obj);
}
/** /**
* i915_gem_release_mmap - remove physical page mappings * i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question * @obj: obj in question
...@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) ...@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false; obj->fault_mappable = false;
} }
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
i915_gem_release_mmap(obj);
}
uint32_t uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{ {
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
struct i915_render_state { struct i915_render_state {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long ggtt_offset; unsigned long ggtt_offset;
void *batch; u32 *batch;
u32 size; u32 size;
u32 len; u32 len;
}; };
...@@ -80,7 +80,7 @@ static struct i915_render_state *render_state_alloc(struct drm_device *dev) ...@@ -80,7 +80,7 @@ static struct i915_render_state *render_state_alloc(struct drm_device *dev)
static void render_state_free(struct i915_render_state *so) static void render_state_free(struct i915_render_state *so)
{ {
kunmap(so->batch); kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj); i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base); drm_gem_object_unreference(&so->obj->base);
kfree(so); kfree(so);
......
...@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring) ...@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
u32 seqno, ctl; u32 seqno;
ring->hangcheck.deadlock++; ring->hangcheck.deadlock++;
...@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring) ...@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1; return -1;
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1; return 1;
if (signaller->hangcheck.deadlock) /* cursory check for an unkickable deadlock */
if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
semaphore_passed(signaller) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) ...@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0; gb_tile_moden = 0;
break; break;
} }
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
} }
} else if (num_pipe_configs == 8) { } else if (num_pipe_configs == 8) {
...@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) ...@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
......
...@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) ...@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
......
...@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) ...@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
......
...@@ -449,6 +449,7 @@ struct radeon_bo_va { ...@@ -449,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */ /* protected by vm mutex */
struct list_head vm_list; struct list_head vm_list;
struct list_head vm_status;
/* constant after initialization */ /* constant after initialization */
struct radeon_vm *vm; struct radeon_vm *vm;
...@@ -867,6 +868,9 @@ struct radeon_vm { ...@@ -867,6 +868,9 @@ struct radeon_vm {
struct list_head va; struct list_head va;
unsigned id; unsigned id;
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */ /* contains the page directory */
struct radeon_bo *page_directory; struct radeon_bo *page_directory;
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
...@@ -875,6 +879,8 @@ struct radeon_vm { ...@@ -875,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */ /* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables; struct radeon_vm_pt *page_tables;
struct radeon_bo_va *ib_bo_va;
struct mutex mutex; struct mutex mutex;
/* last fence for cs using this vm */ /* last fence for cs using this vm */
struct radeon_fence *fence; struct radeon_fence *fence;
...@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev, ...@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev, int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm); struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev, int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_bo_va *bo_va,
struct radeon_bo *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev, void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo); struct radeon_bo *bo);
...@@ -2847,7 +2854,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, ...@@ -2847,7 +2854,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va, struct radeon_bo_va *bo_va,
uint64_t offset, uint64_t offset,
uint32_t flags); uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev, void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va); struct radeon_bo_va *bo_va);
/* audio */ /* audio */
......
...@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, ...@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm) struct radeon_vm *vm)
{ {
struct radeon_device *rdev = p->rdev; struct radeon_device *rdev = p->rdev;
struct radeon_bo_va *bo_va;
int i, r; int i, r;
r = radeon_vm_update_page_directory(rdev, vm); r = radeon_vm_update_page_directory(rdev, vm);
if (r) if (r)
return r; return r;
r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
if (vm->ib_bo_va == NULL) {
DRM_ERROR("Tmp BO not in VM!\n");
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem); &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) if (r)
return r; return r;
...@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, ...@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue; continue;
bo = p->relocs[i].robj; bo = p->relocs[i].robj;
r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r) if (r)
return r; return r;
} }
......
...@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev) ...@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) { if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size); radeon_vm_size);
radeon_vm_size = 4096; radeon_vm_size = 4;
} }
if (radeon_vm_size < 4) { if (radeon_vm_size < 1) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size); radeon_vm_size);
radeon_vm_size = 4096; radeon_vm_size = 4;
} }
/* /*
* Max GPUVM size for Cayman, SI and CI are 40 bits. * Max GPUVM size for Cayman, SI and CI are 40 bits.
*/ */
if (radeon_vm_size > 1024*1024) { if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size); radeon_vm_size);
radeon_vm_size = 4096; radeon_vm_size = 4;
} }
/* defines number of bits in page table versus page directory, /* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */ * page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) { if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) to small\n", dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size); radeon_vm_block_size);
radeon_vm_block_size = 9; radeon_vm_block_size = 9;
} }
if (radeon_vm_block_size > 24 || if (radeon_vm_block_size > 24 ||
radeon_vm_size < (1ull << radeon_vm_block_size)) { (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) to large\n", dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size); radeon_vm_block_size);
radeon_vm_block_size = 9; radeon_vm_block_size = 9;
} }
...@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here. /* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits. * Max GPUVM size for cayman+ is 40 bits.
*/ */
rdev->vm_manager.max_pfn = radeon_vm_size << 8; rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */ /* Set asic functions */
r = radeon_asic_init(rdev); r = radeon_asic_init(rdev);
......
...@@ -173,7 +173,7 @@ int radeon_dpm = -1; ...@@ -173,7 +173,7 @@ int radeon_dpm = -1;
int radeon_aspm = -1; int radeon_aspm = -1;
int radeon_runtime_pm = -1; int radeon_runtime_pm = -1;
int radeon_hard_reset = 0; int radeon_hard_reset = 0;
int radeon_vm_size = 4096; int radeon_vm_size = 4;
int radeon_vm_block_size = 9; int radeon_vm_block_size = 9;
int radeon_deep_color = 0; int radeon_deep_color = 0;
...@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444); ...@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444); module_param_named(hard_reset, radeon_hard_reset, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444); module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
......
...@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */ /* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) { if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv; struct radeon_fpriv *fpriv;
struct radeon_bo_va *bo_va; struct radeon_vm *vm;
int r; int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
...@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM; return -ENOMEM;
} }
r = radeon_vm_init(rdev, &fpriv->vm); vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r) { if (r) {
kfree(fpriv); kfree(fpriv);
return r; return r;
...@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) { if (r) {
radeon_vm_fini(rdev, &fpriv->vm); radeon_vm_fini(rdev, vm);
kfree(fpriv); kfree(fpriv);
return r; return r;
} }
/* map the ib pool buffer read only into /* map the ib pool buffer read only into
* virtual address space */ * virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
rdev->ring_tmp_bo.bo); rdev->ring_tmp_bo.bo);
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED); RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo); radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) { if (r) {
radeon_vm_fini(rdev, &fpriv->vm); radeon_vm_fini(rdev, vm);
kfree(fpriv); kfree(fpriv);
return r; return r;
} }
...@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev, ...@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */ /* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_bo_va *bo_va; struct radeon_vm *vm = &fpriv->vm;
int r; int r;
if (rdev->accel_working) { if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) { if (!r) {
bo_va = radeon_vm_bo_find(&fpriv->vm, if (vm->ib_bo_va)
rdev->ring_tmp_bo.bo); radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
if (bo_va)
radeon_vm_bo_rmv(rdev, bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo); radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
} }
} }
radeon_vm_fini(rdev, &fpriv->vm); radeon_vm_fini(rdev, vm);
kfree(fpriv); kfree(fpriv);
file_priv->driver_priv = NULL; file_priv->driver_priv = NULL;
} }
......
...@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1; bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list); INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va); list_add(&bo_va->vm_list, &vm->va);
...@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, ...@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list; head = &tmp->vm_list;
} }
if (bo_va->soffset) {
/* add a clone of the bo_va to clear the old address */
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
return -ENOMEM;
}
tmp->soffset = bo_va->soffset;
tmp->eoffset = bo_va->eoffset;
tmp->vm = vm;
list_add(&tmp->vm_status, &vm->freed);
}
bo_va->soffset = soffset; bo_va->soffset = soffset;
bo_va->eoffset = eoffset; bo_va->eoffset = eoffset;
bo_va->flags = flags; bo_va->flags = flags;
...@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, ...@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked! * Object have to be reserved and mutex must be locked!
*/ */
int radeon_vm_bo_update(struct radeon_device *rdev, int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_bo_va *bo_va,
struct radeon_bo *bo,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib; struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, ndw; unsigned nptes, ndw;
uint64_t addr; uint64_t addr;
int r; int r;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
if (!bo_va->soffset) { if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm); bo_va->bo, vm);
return -EINVAL; return -EINVAL;
} }
...@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va); trace_radeon_vm_bo_update(bo_va);
nptes = radeon_bo_ngpu_pages(bo); nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */ /* padding, etc. */
ndw = 64; ndw = 64;
...@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return 0; return 0;
} }
/**
* radeon_vm_clear_freed - clear freed BOs in the PT
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
kfree(bo_va);
if (r)
return r;
}
return 0;
}
/** /**
* radeon_vm_bo_rmv - remove a bo to a specific vm * radeon_vm_bo_rmv - remove a bo to a specific vm
* *
...@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
* @bo_va: requested bo_va * @bo_va: requested bo_va
* *
* Remove @bo_va->bo from the requested vm (cayman+). * Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
* *
* Object have to be reserved! * Object have to be reserved!
*/ */
int radeon_vm_bo_rmv(struct radeon_device *rdev, void radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va) struct radeon_bo_va *bo_va)
{ {
int r = 0; struct radeon_vm *vm = bo_va->vm;
mutex_lock(&bo_va->vm->mutex); list_del(&bo_va->bo_list);
if (bo_va->soffset)
r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
if (bo_va->soffset) {
bo_va->bo = NULL;
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va); kfree(bo_va);
return r; }
mutex_unlock(&vm->mutex);
} }
/** /**
...@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r; int r;
vm->id = 0; vm->id = 0;
vm->ib_bo_va = NULL;
vm->fence = NULL; vm->fence = NULL;
vm->last_flush = NULL; vm->last_flush = NULL;
vm->last_id_use = NULL; vm->last_id_use = NULL;
mutex_init(&vm->mutex); mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va); INIT_LIST_HEAD(&vm->va);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev); pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev); pd_entries = radeon_vm_num_pdes(rdev);
...@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va); kfree(bo_va);
} }
} }
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++) for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo); radeon_bo_unref(&vm->page_tables[i].bo);
......
...@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) ...@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL); tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR; tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp); WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
} }
return (wptr & rdev->ih.ptr_mask); return (wptr & rdev->ih.ptr_mask);
} }
......
...@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev) ...@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT; pi->at[i] = TRINITY_AT_DFLT;
/* There are stability issues reported on latops with /* There are stability issues reported on with
* bapm installed when switching between AC and battery * bapm enabled when switching between AC and battery
* power. At the same time, some desktop boards hang * power. At the same time, some MSI boards hang
* if it's not enabled and dpm is enabled. * if it's not enabled and dpm is enabled. Just enable
* it for MSI boards right now.
*/ */
if (rdev->flags & RADEON_IS_MOBILITY) if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = false;
else
pi->enable_bapm = true; pi->enable_bapm = true;
else
pi->enable_bapm = false;
pi->enable_nbps_policy = true; pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true; pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true; pi->enable_gfx_power_gating = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment