Commit 47514ac7 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: move request slabs to direct module init/exit

With the global kmem_cache shrink infrastructure gone there's nothing
special and we can convert them over.

I'm doing this split up into each patch because there's quite a bit of
noise with removing the static global.slab_requests|execute_cbs to just a
slab_requests|execute_cbs.

v2: Make slab static (Jason, 0day)
Reviewed-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727121037.2041102-7-daniel.vetter@ffwll.ch
parent c8ad09af
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "i915_globals.h" #include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h" #include "i915_scheduler.h"
#include "i915_vma.h" #include "i915_vma.h"
...@@ -30,7 +29,6 @@ static void __i915_globals_cleanup(void) ...@@ -30,7 +29,6 @@ static void __i915_globals_cleanup(void)
} }
static __initconst int (* const initfn[])(void) = { static __initconst int (* const initfn[])(void) = {
i915_global_request_init,
i915_global_scheduler_init, i915_global_scheduler_init,
i915_global_vma_init, i915_global_vma_init,
}; };
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h" #include "gem/i915_gem_object.h"
#include "i915_request.h"
#include "i915_perf.h" #include "i915_perf.h"
#include "i915_globals.h" #include "i915_globals.h"
#include "i915_selftest.h" #include "i915_selftest.h"
...@@ -1268,6 +1269,7 @@ static const struct { ...@@ -1268,6 +1269,7 @@ static const struct {
{ i915_context_module_init, i915_context_module_exit }, { i915_context_module_init, i915_context_module_exit },
{ i915_gem_context_module_init, i915_gem_context_module_exit }, { i915_gem_context_module_init, i915_gem_context_module_exit },
{ i915_objects_module_init, i915_objects_module_exit }, { i915_objects_module_init, i915_objects_module_exit },
{ i915_request_module_init, i915_request_module_exit },
{ i915_globals_init, i915_globals_exit }, { i915_globals_init, i915_globals_exit },
{ i915_mock_selftests, NULL }, { i915_mock_selftests, NULL },
{ i915_pmu_init, i915_pmu_exit }, { i915_pmu_init, i915_pmu_exit },
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
#include "i915_active.h" #include "i915_active.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_pm.h" #include "intel_pm.h"
...@@ -52,11 +51,8 @@ struct execute_cb { ...@@ -52,11 +51,8 @@ struct execute_cb {
struct i915_request *signal; struct i915_request *signal;
}; };
static struct i915_global_request { static struct kmem_cache *slab_requests;
struct i915_global base; static struct kmem_cache *slab_execute_cbs;
struct kmem_cache *slab_requests;
struct kmem_cache *slab_execute_cbs;
} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence) static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{ {
...@@ -107,7 +103,7 @@ static signed long i915_fence_wait(struct dma_fence *fence, ...@@ -107,7 +103,7 @@ static signed long i915_fence_wait(struct dma_fence *fence,
struct kmem_cache *i915_request_slab_cache(void) struct kmem_cache *i915_request_slab_cache(void)
{ {
return global.slab_requests; return slab_requests;
} }
static void i915_fence_release(struct dma_fence *fence) static void i915_fence_release(struct dma_fence *fence)
...@@ -140,7 +136,7 @@ static void i915_fence_release(struct dma_fence *fence) ...@@ -140,7 +136,7 @@ static void i915_fence_release(struct dma_fence *fence)
intel_context_put(rq->context); intel_context_put(rq->context);
kmem_cache_free(global.slab_requests, rq); kmem_cache_free(slab_requests, rq);
} }
const struct dma_fence_ops i915_fence_ops = { const struct dma_fence_ops i915_fence_ops = {
...@@ -157,7 +153,7 @@ static void irq_execute_cb(struct irq_work *wrk) ...@@ -157,7 +153,7 @@ static void irq_execute_cb(struct irq_work *wrk)
struct execute_cb *cb = container_of(wrk, typeof(*cb), work); struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
i915_sw_fence_complete(cb->fence); i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb); kmem_cache_free(slab_execute_cbs, cb);
} }
static __always_inline void static __always_inline void
...@@ -462,7 +458,7 @@ __await_execution(struct i915_request *rq, ...@@ -462,7 +458,7 @@ __await_execution(struct i915_request *rq,
if (i915_request_is_active(signal)) if (i915_request_is_active(signal))
return 0; return 0;
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); cb = kmem_cache_alloc(slab_execute_cbs, gfp);
if (!cb) if (!cb)
return -ENOMEM; return -ENOMEM;
...@@ -808,7 +804,7 @@ request_alloc_slow(struct intel_timeline *tl, ...@@ -808,7 +804,7 @@ request_alloc_slow(struct intel_timeline *tl,
rq = list_first_entry(&tl->requests, typeof(*rq), link); rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq); i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests, rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (rq) if (rq)
return rq; return rq;
...@@ -821,7 +817,7 @@ request_alloc_slow(struct intel_timeline *tl, ...@@ -821,7 +817,7 @@ request_alloc_slow(struct intel_timeline *tl,
retire_requests(tl); retire_requests(tl);
out: out:
return kmem_cache_alloc(global.slab_requests, gfp); return kmem_cache_alloc(slab_requests, gfp);
} }
static void __i915_request_ctor(void *arg) static void __i915_request_ctor(void *arg)
...@@ -882,7 +878,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -882,7 +878,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
* *
* Do not use kmem_cache_zalloc() here! * Do not use kmem_cache_zalloc() here!
*/ */
rq = kmem_cache_alloc(global.slab_requests, rq = kmem_cache_alloc(slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) { if (unlikely(!rq)) {
rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
...@@ -984,7 +980,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -984,7 +980,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
err_free: err_free:
intel_context_put(ce); intel_context_put(ce);
kmem_cache_free(global.slab_requests, rq); kmem_cache_free(slab_requests, rq);
err_unreserve: err_unreserve:
intel_context_unpin(ce); intel_context_unpin(ce);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -2080,19 +2076,15 @@ enum i915_request_state i915_test_request_state(struct i915_request *rq) ...@@ -2080,19 +2076,15 @@ enum i915_request_state i915_test_request_state(struct i915_request *rq)
#include "selftests/i915_request.c" #include "selftests/i915_request.c"
#endif #endif
static void i915_global_request_exit(void) void i915_request_module_exit(void)
{ {
kmem_cache_destroy(global.slab_execute_cbs); kmem_cache_destroy(slab_execute_cbs);
kmem_cache_destroy(global.slab_requests); kmem_cache_destroy(slab_requests);
} }
static struct i915_global_request global = { { int __init i915_request_module_init(void)
.exit = i915_global_request_exit,
} };
int __init i915_global_request_init(void)
{ {
global.slab_requests = slab_requests =
kmem_cache_create("i915_request", kmem_cache_create("i915_request",
sizeof(struct i915_request), sizeof(struct i915_request),
__alignof__(struct i915_request), __alignof__(struct i915_request),
...@@ -2100,20 +2092,19 @@ int __init i915_global_request_init(void) ...@@ -2100,20 +2092,19 @@ int __init i915_global_request_init(void)
SLAB_RECLAIM_ACCOUNT | SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU, SLAB_TYPESAFE_BY_RCU,
__i915_request_ctor); __i915_request_ctor);
if (!global.slab_requests) if (!slab_requests)
return -ENOMEM; return -ENOMEM;
global.slab_execute_cbs = KMEM_CACHE(execute_cb, slab_execute_cbs = KMEM_CACHE(execute_cb,
SLAB_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT | SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU); SLAB_TYPESAFE_BY_RCU);
if (!global.slab_execute_cbs) if (!slab_execute_cbs)
goto err_requests; goto err_requests;
i915_global_register(&global.base);
return 0; return 0;
err_requests: err_requests:
kmem_cache_destroy(global.slab_requests); kmem_cache_destroy(slab_requests);
return -ENOMEM; return -ENOMEM;
} }
...@@ -668,4 +668,7 @@ enum i915_request_state { ...@@ -668,4 +668,7 @@ enum i915_request_state {
enum i915_request_state i915_test_request_state(struct i915_request *rq); enum i915_request_state i915_test_request_state(struct i915_request *rq);
void i915_request_module_exit(void);
int i915_request_module_init(void);
#endif /* I915_REQUEST_H */ #endif /* I915_REQUEST_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment