Commit efab6d8d authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Use a separate slab for requests

requests are even more frequently allocated than objects and equally
benefit from having a dedicated slab.

v2: Rebase
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent f1e2daea
...@@ -1006,8 +1006,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1006,8 +1006,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
if (dev_priv->slab) if (dev_priv->requests)
kmem_cache_destroy(dev_priv->slab); kmem_cache_destroy(dev_priv->requests);
if (dev_priv->objects)
kmem_cache_destroy(dev_priv->objects);
kfree(dev_priv); kfree(dev_priv);
return ret; return ret;
} }
...@@ -1090,8 +1092,10 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1090,8 +1092,10 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL) if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
if (dev_priv->slab) if (dev_priv->requests)
kmem_cache_destroy(dev_priv->slab); kmem_cache_destroy(dev_priv->requests);
if (dev_priv->objects)
kmem_cache_destroy(dev_priv->objects);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
kfree(dev_priv); kfree(dev_priv);
......
...@@ -1558,7 +1558,8 @@ struct i915_virtual_gpu { ...@@ -1558,7 +1558,8 @@ struct i915_virtual_gpu {
struct drm_i915_private { struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
struct kmem_cache *slab; struct kmem_cache *objects;
struct kmem_cache *requests;
const struct intel_device_info info; const struct intel_device_info info;
...@@ -2044,6 +2045,7 @@ struct drm_i915_gem_request { ...@@ -2044,6 +2045,7 @@ struct drm_i915_gem_request {
struct kref ref; struct kref ref;
/** On Which ring this request was generated */ /** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */ /** GEM sequence number associated with this request. */
......
...@@ -378,13 +378,13 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, ...@@ -378,13 +378,13 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
void *i915_gem_object_alloc(struct drm_device *dev) void *i915_gem_object_alloc(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
} }
void i915_gem_object_free(struct drm_i915_gem_object *obj) void i915_gem_object_free(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
kmem_cache_free(dev_priv->slab, obj); kmem_cache_free(dev_priv->objects, obj);
} }
static int static int
...@@ -2506,43 +2506,45 @@ void i915_gem_request_free(struct kref *req_ref) ...@@ -2506,43 +2506,45 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
} }
kfree(req); kmem_cache_free(req->i915->requests, req);
} }
int i915_gem_request_alloc(struct intel_engine_cs *ring, int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx) struct intel_context *ctx)
{ {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *rq;
int ret; int ret;
struct drm_i915_gem_request *request;
struct drm_i915_private *dev_private = ring->dev->dev_private;
if (ring->outstanding_lazy_request) if (ring->outstanding_lazy_request)
return 0; return 0;
request = kzalloc(sizeof(*request), GFP_KERNEL); rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (request == NULL) if (rq == NULL)
return -ENOMEM; return -ENOMEM;
ret = i915_gem_get_seqno(ring->dev, &request->seqno); kref_init(&rq->ref);
rq->i915 = dev_priv;
ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
if (ret) { if (ret) {
kfree(request); kfree(rq);
return ret; return ret;
} }
kref_init(&request->ref); rq->ring = ring;
request->ring = ring; rq->uniq = dev_priv->request_uniq++;
request->uniq = dev_private->request_uniq++;
if (i915.enable_execlists) if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(request, ctx); ret = intel_logical_ring_alloc_request_extras(rq, ctx);
else else
ret = intel_ring_alloc_request_extras(request); ret = intel_ring_alloc_request_extras(rq);
if (ret) { if (ret) {
kfree(request); kfree(rq);
return ret; return ret;
} }
ring->outstanding_lazy_request = request; ring->outstanding_lazy_request = rq;
return 0; return 0;
} }
...@@ -4984,11 +4986,16 @@ i915_gem_load(struct drm_device *dev) ...@@ -4984,11 +4986,16 @@ i915_gem_load(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
dev_priv->slab = dev_priv->objects =
kmem_cache_create("i915_gem_object", kmem_cache_create("i915_gem_object",
sizeof(struct drm_i915_gem_object), 0, sizeof(struct drm_i915_gem_object), 0,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
dev_priv->requests =
kmem_cache_create("i915_gem_request",
sizeof(struct drm_i915_gem_request), 0,
SLAB_HWCACHE_ALIGN,
NULL);
INIT_LIST_HEAD(&dev_priv->vm_list); INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base); i915_init_vm(dev_priv, &dev_priv->gtt.base);
......
...@@ -2139,7 +2139,6 @@ int intel_ring_idle(struct intel_engine_cs *ring) ...@@ -2139,7 +2139,6 @@ int intel_ring_idle(struct intel_engine_cs *ring)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{ {
request->ringbuf = request->ring->buffer; request->ringbuf = request->ring->buffer;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment