Commit 0eafec6d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Enable lockless lookup of request tracking via RCU

If we enable RCU for the requests (providing a grace period where we can
inspect a "dead" request before it is freed), we can allow callers to
carefully perform lockless lookup of an active request.

However, by enabling deferred freeing of requests, we can potentially
hog a lot of memory when dealing with tens of thousands of requests per
second - with a quick insertion of a synchronize_rcu() inside our
shrinker callback, that issue disappears.

v2: Currently, it is our responsibility to handle reclaim i.e. to avoid
hogging memory with the delayed slab frees. At the moment, we wait for a
grace period in the shrinker, and block for all RCU callbacks on oom.
Suggested alternatives focus on flushing our RCU callback when we have a
certain number of outstanding request frees, and blocking on that flush
after a second high watermark. (So rather than wait for the system to
run out of memory, we stop issuing requests - both are nondeterministic.)

Paul E. McKenney wrote:

Another approach is synchronize_rcu() after some largish number of
requests.  The advantage of this approach is that it throttles the
production of callbacks at the source.  The corresponding disadvantage
is that it slows things up.

Another approach is to use call_rcu(), but if the previous call_rcu()
is still in flight, block waiting for it.  Yet another approach is
the get_state_synchronize_rcu() / cond_synchronize_rcu() pair.  The
idea is to do something like this:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();

You would of course do an initial get_state_synchronize_rcu() to
get things going.  This would not block unless there was less than
one grace period's worth of time between invocations.  But this
assumes a busy system, where there is almost always a grace period
in flight.  But you can make that happen as follows:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();
        call_rcu(&my_rcu_head, noop_function);

Note that you need additional code to make sure that the old callback
has completed before doing a new one.  Setting and clearing a flag
with appropriate memory ordering control suffices (e.g,. smp_load_acquire()
and smp_store_release()).

v3: More comments on compiler and processor order of operations within
the RCU lookup and discover we can use rcu_access_pointer() here instead.

v4: Wrap i915_gem_active_get_rcu() to take the rcu_read_lock itself.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: "Goel, Akash" <akash.goel@intel.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-25-git-send-email-chris@chris-wilson.co.uk
parent 00e60f26
...@@ -4431,7 +4431,9 @@ i915_gem_load_init(struct drm_device *dev) ...@@ -4431,7 +4431,9 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->requests = dev_priv->requests =
kmem_cache_create("i915_gem_request", kmem_cache_create("i915_gem_request",
sizeof(struct drm_i915_gem_request), 0, sizeof(struct drm_i915_gem_request), 0,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_DESTROY_BY_RCU,
NULL); NULL);
INIT_LIST_HEAD(&dev_priv->context_list); INIT_LIST_HEAD(&dev_priv->context_list);
...@@ -4467,6 +4469,9 @@ void i915_gem_load_cleanup(struct drm_device *dev) ...@@ -4467,6 +4469,9 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects); kmem_cache_destroy(dev_priv->objects);
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
} }
int i915_gem_freeze_late(struct drm_i915_private *dev_priv) int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
......
...@@ -205,7 +205,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) ...@@ -205,7 +205,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
prefetchw(next); prefetchw(next);
INIT_LIST_HEAD(&active->link); INIT_LIST_HEAD(&active->link);
active->request = NULL; RCU_INIT_POINTER(active->request, NULL);
active->retire(active, request); active->retire(active, request);
} }
......
...@@ -183,6 +183,12 @@ i915_gem_request_get(struct drm_i915_gem_request *req) ...@@ -183,6 +183,12 @@ i915_gem_request_get(struct drm_i915_gem_request *req)
return to_request(fence_get(&req->fence)); return to_request(fence_get(&req->fence));
} }
static inline struct drm_i915_gem_request *
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
{
return to_request(fence_get_rcu(&req->fence));
}
static inline void static inline void
i915_gem_request_put(struct drm_i915_gem_request *req) i915_gem_request_put(struct drm_i915_gem_request *req)
{ {
...@@ -286,7 +292,7 @@ typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, ...@@ -286,7 +292,7 @@ typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
struct drm_i915_gem_request *); struct drm_i915_gem_request *);
struct i915_gem_active { struct i915_gem_active {
struct drm_i915_gem_request *request; struct drm_i915_gem_request __rcu *request;
struct list_head link; struct list_head link;
i915_gem_retire_fn retire; i915_gem_retire_fn retire;
}; };
...@@ -327,13 +333,19 @@ i915_gem_active_set(struct i915_gem_active *active, ...@@ -327,13 +333,19 @@ i915_gem_active_set(struct i915_gem_active *active,
struct drm_i915_gem_request *request) struct drm_i915_gem_request *request)
{ {
list_move(&active->link, &request->active_list); list_move(&active->link, &request->active_list);
active->request = request; rcu_assign_pointer(active->request, request);
} }
static inline struct drm_i915_gem_request * static inline struct drm_i915_gem_request *
__i915_gem_active_peek(const struct i915_gem_active *active) __i915_gem_active_peek(const struct i915_gem_active *active)
{ {
return active->request; /* Inside the error capture (running with the driver in an unknown
* state), we want to bend the rules slightly (a lot).
*
* Work is in progress to make it safer, in the meantime this keeps
* the known issue from spamming the logs.
*/
return rcu_dereference_protected(active->request, 1);
} }
/** /**
...@@ -349,7 +361,29 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) ...@@ -349,7 +361,29 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
request = active->request; request = rcu_dereference_protected(active->request,
lockdep_is_held(mutex));
if (!request || i915_gem_request_completed(request))
return NULL;
return request;
}
/**
* i915_gem_active_peek_rcu - report the active request being monitored
* @active - the active tracker
*
* i915_gem_active_peek_rcu() returns the current request being tracked if
* still active, or NULL. It does not obtain a reference on the request
* for the caller, and inspection of the request is only valid under
* the RCU lock.
*/
static inline struct drm_i915_gem_request *
i915_gem_active_peek_rcu(const struct i915_gem_active *active)
{
struct drm_i915_gem_request *request;
request = rcu_dereference(active->request);
if (!request || i915_gem_request_completed(request)) if (!request || i915_gem_request_completed(request))
return NULL; return NULL;
...@@ -369,6 +403,119 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) ...@@ -369,6 +403,119 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
return i915_gem_request_get(i915_gem_active_peek(active, mutex)); return i915_gem_request_get(i915_gem_active_peek(active, mutex));
} }
/**
* __i915_gem_active_get_rcu - return a reference to the active request
* @active - the active tracker
*
* __i915_gem_active_get() returns a reference to the active request, or NULL
* if the active tracker is idle. The caller must hold the RCU read lock, but
* the returned pointer is safe to use outside of RCU.
*/
static inline struct drm_i915_gem_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
/* Performing a lockless retrieval of the active request is super
* tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
* will not be freed and then *reused*. Viz,
*
* Thread A Thread B
*
* req = active.request
* retire(req) -> free(req);
* (req is now first on the slab freelist)
* active.request = NULL
*
* req = new submission on a new object
* ref(req)
*
* To prevent the request from being reused whilst the caller
* uses it, we take a reference like normal. Whilst acquiring
* the reference we check that it is not in a destroyed state
* (refcnt == 0). That prevents the request being reallocated
* whilst the caller holds on to it. To check that the request
* was not reallocated as we acquired the reference we have to
* check that our request remains the active request across
* the lookup, in the same manner as a seqlock. The visibility
* of the pointer versus the reference counting is controlled
* by using RCU barriers (rcu_dereference and rcu_assign_pointer).
*
* In the middle of all that, we inspect whether the request is
* complete. Retiring is lazy so the request may be completed long
* before the active tracker is updated. Querying whether the
* request is complete is far cheaper (as it involves no locked
* instructions setting cachelines to exclusive) than acquiring
* the reference, so we do it first. The RCU read lock ensures the
* pointer dereference is valid, but does not ensure that the
* seqno nor HWS is the right one! However, if the request was
* reallocated, that means the active tracker's request was complete.
* If the new request is also complete, then both are and we can
* just report the active tracker is idle. If the new request is
* incomplete, then we acquire a reference on it and check that
* it remained the active request.
*/
do {
struct drm_i915_gem_request *request;
request = rcu_dereference(active->request);
if (!request || i915_gem_request_completed(request))
return NULL;
request = i915_gem_request_get_rcu(request);
/* What stops the following rcu_access_pointer() from occurring
* before the above i915_gem_request_get_rcu()? If we were
* to read the value before pausing to get the reference to
* the request, we may not notice a change in the active
* tracker.
*
* The rcu_access_pointer() is a mere compiler barrier, which
* means both the CPU and compiler are free to perform the
* memory read without constraint. The compiler only has to
* ensure that any operations after the rcu_access_pointer()
* occur afterwards in program order. This means the read may
* be performed earlier by an out-of-order CPU, or adventurous
* compiler.
*
* The atomic operation at the heart of
* i915_gem_request_get_rcu(), see fence_get_rcu(), is
* atomic_inc_not_zero() which is only a full memory barrier
* when successful. That is, if i915_gem_request_get_rcu()
* returns the request (and so with the reference counted
* incremented) then the following read for rcu_access_pointer()
* must occur after the atomic operation and so confirm
* that this request is the one currently being tracked.
*/
if (!request || request == rcu_access_pointer(active->request))
return rcu_pointer_handoff(request);
i915_gem_request_put(request);
} while (1);
}
/**
* i915_gem_active_get_unlocked - return a reference to the active request
* @active - the active tracker
*
* i915_gem_active_get_unlocked() returns a reference to the active request,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
* The reference should be freed with i915_gem_request_put().
*/
static inline struct drm_i915_gem_request *
i915_gem_active_get_unlocked(const struct i915_gem_active *active)
{
struct drm_i915_gem_request *request;
rcu_read_lock();
request = __i915_gem_active_get_rcu(active);
rcu_read_unlock();
return request;
}
/** /**
* i915_gem_active_isset - report whether the active tracker is assigned * i915_gem_active_isset - report whether the active tracker is assigned
* @active - the active tracker * @active - the active tracker
...@@ -380,7 +527,7 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) ...@@ -380,7 +527,7 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
static inline bool static inline bool
i915_gem_active_isset(const struct i915_gem_active *active) i915_gem_active_isset(const struct i915_gem_active *active)
{ {
return active->request; return rcu_access_pointer(active->request);
} }
/** /**
...@@ -437,7 +584,8 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -437,7 +584,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret; int ret;
request = active->request; request = rcu_dereference_protected(active->request,
lockdep_is_held(mutex));
if (!request) if (!request)
return 0; return 0;
...@@ -446,7 +594,8 @@ i915_gem_active_retire(struct i915_gem_active *active, ...@@ -446,7 +594,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
return ret; return ret;
list_del_init(&active->link); list_del_init(&active->link);
active->request = NULL; RCU_INIT_POINTER(active->request, NULL);
active->retire(active, request); active->retire(active, request);
return 0; return 0;
......
...@@ -205,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -205,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
return count; return count;
} }
...@@ -225,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -225,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
*/ */
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{ {
return i915_gem_shrink(dev_priv, -1UL, unsigned long freed;
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | freed = i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_ACTIVE); I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
return freed;
} }
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment