Commit b7ffd9d9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2022-11-10' of...

Merge tag 'drm-intel-fixes-2022-11-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix sg_table handling in map_dma_buf (Matthew Auld)
- Send PSR update also on invalidate (Jouni Högander)
- Do not set cache_dirty for DGFX (Niranjana Vishwanathapura)
- Restore userptr probe_range behaviour (Matthew Auld)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y2zCy5q85qE9W0J8@tursulin-desk
parents 42bf3ce7 178e31ce
......@@ -2201,8 +2201,11 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 val;
if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* Send one update otherwise lag is observed in screen */
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
return;
}
val = man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
......
......@@ -40,13 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err;
}
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
......
......@@ -369,14 +369,14 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
__start_cpu_write(obj);
/*
* On non-LLC platforms, force the flush-on-acquire if this is ever
* On non-LLC igfx platforms, force the flush-on-acquire if this is ever
* swapped-in. Our async flush path is not trust worthy enough yet(and
* happens in the wrong order), and with some tricks it's conceivable
* for userspace to change the cache-level to I915_CACHE_NONE after the
* pages are swapped-in, and since execbuf binds the object before doing
* the async flush, we have a race window.
*/
if (!HAS_LLC(i915))
if (!HAS_LLC(i915) && !IS_DGFX(i915))
obj->cache_dirty = true;
}
......
......@@ -428,9 +428,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
unsigned long end = addr + len;
mmap_read_lock(mm);
for_each_vma_range(vmi, vma, addr + len) {
for_each_vma_range(vmi, vma, end) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
......@@ -442,7 +443,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
}
mmap_read_unlock(mm);
if (vma)
if (vma || addr < end)
return -EFAULT;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment