Commit 47514ac7 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: move request slabs to direct module init/exit

With the global kmem_cache shrink infrastructure gone there's nothing
special and we can convert them over.

I'm doing this split up into each patch because there's quite a bit of
noise with removing the static global.slab_requests|execute_cbs to just a
slab_requests|execute_cbs.

v2: Make slab static (Jason, 0day)
Reviewed-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727121037.2041102-7-daniel.vetter@ffwll.ch
parent c8ad09af
......@@ -8,7 +8,6 @@
#include <linux/workqueue.h>
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "i915_vma.h"
......@@ -30,7 +29,6 @@ static void __i915_globals_cleanup(void)
}
static __initconst int (* const initfn[])(void) = {
i915_global_request_init,
i915_global_scheduler_init,
i915_global_vma_init,
};
......
......@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
#include "i915_request.h"
#include "i915_perf.h"
#include "i915_globals.h"
#include "i915_selftest.h"
......@@ -1268,6 +1269,7 @@ static const struct {
{ i915_context_module_init, i915_context_module_exit },
{ i915_gem_context_module_init, i915_gem_context_module_exit },
{ i915_objects_module_init, i915_objects_module_exit },
{ i915_request_module_init, i915_request_module_exit },
{ i915_globals_init, i915_globals_exit },
{ i915_mock_selftests, NULL },
{ i915_pmu_init, i915_pmu_exit },
......
......@@ -42,7 +42,6 @@
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "intel_pm.h"
......@@ -52,11 +51,8 @@ struct execute_cb {
struct i915_request *signal;
};
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
struct kmem_cache *slab_execute_cbs;
} global;
static struct kmem_cache *slab_requests;
static struct kmem_cache *slab_execute_cbs;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
......@@ -107,7 +103,7 @@ static signed long i915_fence_wait(struct dma_fence *fence,
struct kmem_cache *i915_request_slab_cache(void)
{
return global.slab_requests;
return slab_requests;
}
static void i915_fence_release(struct dma_fence *fence)
......@@ -140,7 +136,7 @@ static void i915_fence_release(struct dma_fence *fence)
intel_context_put(rq->context);
kmem_cache_free(global.slab_requests, rq);
kmem_cache_free(slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
......@@ -157,7 +153,7 @@ static void irq_execute_cb(struct irq_work *wrk)
struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb);
kmem_cache_free(slab_execute_cbs, cb);
}
static __always_inline void
......@@ -462,7 +458,7 @@ __await_execution(struct i915_request *rq,
if (i915_request_is_active(signal))
return 0;
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
cb = kmem_cache_alloc(slab_execute_cbs, gfp);
if (!cb)
return -ENOMEM;
......@@ -808,7 +804,7 @@ request_alloc_slow(struct intel_timeline *tl,
rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests,
rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (rq)
return rq;
......@@ -821,7 +817,7 @@ request_alloc_slow(struct intel_timeline *tl,
retire_requests(tl);
out:
return kmem_cache_alloc(global.slab_requests, gfp);
return kmem_cache_alloc(slab_requests, gfp);
}
static void __i915_request_ctor(void *arg)
......@@ -882,7 +878,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
*
* Do not use kmem_cache_zalloc() here!
*/
rq = kmem_cache_alloc(global.slab_requests,
rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
......@@ -984,7 +980,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
err_free:
intel_context_put(ce);
kmem_cache_free(global.slab_requests, rq);
kmem_cache_free(slab_requests, rq);
err_unreserve:
intel_context_unpin(ce);
return ERR_PTR(ret);
......@@ -2080,19 +2076,15 @@ enum i915_request_state i915_test_request_state(struct i915_request *rq)
#include "selftests/i915_request.c"
#endif
static void i915_global_request_exit(void)
void i915_request_module_exit(void)
{
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
kmem_cache_destroy(slab_execute_cbs);
kmem_cache_destroy(slab_requests);
}
static struct i915_global_request global = { {
.exit = i915_global_request_exit,
} };
int __init i915_global_request_init(void)
int __init i915_request_module_init(void)
{
global.slab_requests =
slab_requests =
kmem_cache_create("i915_request",
sizeof(struct i915_request),
__alignof__(struct i915_request),
......@@ -2100,20 +2092,19 @@ int __init i915_global_request_init(void)
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU,
__i915_request_ctor);
if (!global.slab_requests)
if (!slab_requests)
return -ENOMEM;
global.slab_execute_cbs = KMEM_CACHE(execute_cb,
slab_execute_cbs = KMEM_CACHE(execute_cb,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
if (!global.slab_execute_cbs)
if (!slab_execute_cbs)
goto err_requests;
i915_global_register(&global.base);
return 0;
err_requests:
kmem_cache_destroy(global.slab_requests);
kmem_cache_destroy(slab_requests);
return -ENOMEM;
}
......@@ -668,4 +668,7 @@ enum i915_request_state {
enum i915_request_state i915_test_request_state(struct i915_request *rq);
void i915_request_module_exit(void);
int i915_request_module_init(void);
#endif /* I915_REQUEST_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment