Commit bfaae47d authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: make lockdep slightly happier about execbuf.

As soon as we install fences, we should stop allocating memory
in order to prevent any potential deadlocks.

This is required later on, when we start adding support for
dma-fence annotations.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-11-maarten.lankhorst@linux.intel.com
parent a85fffe3
...@@ -50,11 +50,12 @@ enum { ...@@ -50,11 +50,12 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */ #define DBG_FORCE_RELOC 0 /* choose one of the above! */
}; };
#define __EXEC_OBJECT_HAS_PIN BIT(31) /* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
#define __EXEC_OBJECT_HAS_FENCE BIT(30) #define __EXEC_OBJECT_HAS_PIN BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29) #define __EXEC_OBJECT_HAS_FENCE BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28) #define __EXEC_OBJECT_NEEDS_MAP BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ #define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above + */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31) #define __EXEC_HAS_RELOC BIT(31)
...@@ -935,6 +936,12 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) ...@@ -935,6 +936,12 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
} }
} }
if (!(ev->flags & EXEC_OBJECT_WRITE)) {
err = dma_resv_reserve_shared(vma->resv, 1);
if (err)
return err;
}
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
} }
...@@ -2202,7 +2209,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -2202,7 +2209,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
} }
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(vma, eb->request, flags); err = i915_vma_move_to_active(vma, eb->request,
flags | __EXEC_OBJECT_NO_RESERVE);
} }
if (unlikely(err)) if (unlikely(err))
...@@ -2454,6 +2462,10 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, ...@@ -2454,6 +2462,10 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (err) if (err)
goto err_commit; goto err_commit;
err = dma_resv_reserve_shared(shadow->resv, 1);
if (err)
goto err_commit;
/* Wait for all writes (and relocs) into the batch to complete */ /* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain, err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false, pw->batch->resv, NULL, false,
......
...@@ -293,18 +293,13 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) ...@@ -293,18 +293,13 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
static struct i915_active_fence * static struct i915_active_fence *
active_instance(struct i915_active *ref, u64 idx) active_instance(struct i915_active *ref, u64 idx)
{ {
struct active_node *node, *prealloc; struct active_node *node;
struct rb_node **p, *parent; struct rb_node **p, *parent;
node = __active_lookup(ref, idx); node = __active_lookup(ref, idx);
if (likely(node)) if (likely(node))
return &node->base; return &node->base;
/* Preallocate a replacement, just in case */
prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
if (!prealloc)
return NULL;
spin_lock_irq(&ref->tree_lock); spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
...@@ -314,10 +309,8 @@ active_instance(struct i915_active *ref, u64 idx) ...@@ -314,10 +309,8 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p; parent = *p;
node = rb_entry(parent, struct active_node, node); node = rb_entry(parent, struct active_node, node);
if (node->timeline == idx) { if (node->timeline == idx)
kmem_cache_free(global.slab_cache, prealloc);
goto out; goto out;
}
if (node->timeline < idx) if (node->timeline < idx)
p = &parent->rb_right; p = &parent->rb_right;
...@@ -325,7 +318,14 @@ active_instance(struct i915_active *ref, u64 idx) ...@@ -325,7 +318,14 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left; p = &parent->rb_left;
} }
node = prealloc; /*
* XXX: We should preallocate this before i915_active_ref() is ever
* called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
*/
node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
if (!node)
goto out;
__i915_active_fence_init(&node->base, NULL, node_retire); __i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref; node->ref = ref;
node->timeline = idx; node->timeline = idx;
......
...@@ -1247,9 +1247,11 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1247,9 +1247,11 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0; obj->read_domains = 0;
} else { } else {
err = dma_resv_reserve_shared(vma->resv, 1); if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
if (unlikely(err)) err = dma_resv_reserve_shared(vma->resv, 1);
return err; if (unlikely(err))
return err;
}
dma_resv_add_shared_fence(vma->resv, &rq->fence); dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0; obj->write_domain = 0;
......
...@@ -52,6 +52,9 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma) ...@@ -52,6 +52,9 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active); return !i915_active_is_idle(&vma->active);
} }
/* do not reserve memory to prevent deadlocks */
#define __EXEC_OBJECT_NO_RESERVE BIT(31)
int __must_check __i915_vma_move_to_active(struct i915_vma *vma, int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq); struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma, int __must_check i915_vma_move_to_active(struct i915_vma *vma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment