Commit 8ee14975 authored by Oscar Mateo's avatar Oscar Mateo Committed by Daniel Vetter

drm/i915: Split the ringbuffers from the rings (1/3)

As advanced by the previous patch, the ringbuffers and the engine
command streamers belong in different structs. This is so because,
while they used to be tightly coupled together, the new Logical
Ring Contexts (LRC for short) have a ringbuffer each.

In legacy code, we will use the buffer* pointer inside each ring
to get to the pertaining ringbuffer (the actual switch will be
done in the next patch). In the new Execlists code, this pointer
will be NULL and we will use instead the one inside the context
instead.
Signed-off-by: default avatarOscar Mateo <oscar.mateo@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent a4872ba6
...@@ -1422,8 +1422,16 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) ...@@ -1422,8 +1422,16 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
static int intel_init_ring_buffer(struct drm_device *dev, static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret; int ret;
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
ring->dev = dev; ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
...@@ -1435,18 +1443,18 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1435,18 +1443,18 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring); ret = init_status_page(ring);
if (ret) if (ret)
return ret; goto error;
} else { } else {
BUG_ON(ring->id != RCS); BUG_ON(ring->id != RCS);
ret = init_phys_status_page(ring); ret = init_phys_status_page(ring);
if (ret) if (ret)
return ret; goto error;
} }
ret = allocate_ring_buffer(ring); ret = allocate_ring_buffer(ring);
if (ret) { if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
return ret; goto error;
} }
/* Workaround an erratum on the i830 which causes a hang if /* Workaround an erratum on the i830 which causes a hang if
...@@ -1459,9 +1467,18 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1459,9 +1467,18 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = i915_cmd_parser_init_ring(ring); ret = i915_cmd_parser_init_ring(ring);
if (ret) if (ret)
return ret; goto error;
ret = ring->init(ring);
if (ret)
goto error;
return 0;
return ring->init(ring); error:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
} }
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
...@@ -1488,6 +1505,9 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) ...@@ -1488,6 +1505,9 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
cleanup_status_page(ring); cleanup_status_page(ring);
i915_cmd_parser_fini_ring(ring); i915_cmd_parser_fini_ring(ring);
kfree(ring->buffer);
ring->buffer = NULL;
} }
static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
...@@ -2022,15 +2042,24 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ...@@ -2022,15 +2042,24 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS]; struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret; int ret;
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
ring->name = "render ring"; ring->name = "render ring";
ring->id = RCS; ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE; ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */ /* non-kms not supported on gen6+ */
return -ENODEV; ret = -ENODEV;
goto err_ringbuf;
} }
/* Note: gem is not supported on gen5/ilk without kms (the corresponding /* Note: gem is not supported on gen5/ilk without kms (the corresponding
...@@ -2074,16 +2103,24 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ...@@ -2074,16 +2103,24 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
if (ring->virtual_start == NULL) { if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for" DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n"); " ring buffer\n");
return -ENOMEM; ret = -ENOMEM;
goto err_ringbuf;
} }
if (!I915_NEED_GFX_HWS(dev)) { if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring); ret = init_phys_status_page(ring);
if (ret) if (ret)
return ret; goto err_vstart;
} }
return 0; return 0;
err_vstart:
iounmap(ring->virtual_start);
err_ringbuf:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
} }
int intel_init_bsd_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev)
......
...@@ -58,6 +58,27 @@ struct intel_ring_hangcheck { ...@@ -58,6 +58,27 @@ struct intel_ring_hangcheck {
bool deadlock; bool deadlock;
}; };
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
u32 head;
u32 tail;
int space;
int size;
int effective_size;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position.
*
* last_retired_head is set to -1 after the value is consumed so
* we can detect new retirements.
*/
u32 last_retired_head;
};
struct intel_engine_cs { struct intel_engine_cs {
const char *name; const char *name;
enum intel_ring_id { enum intel_ring_id {
...@@ -73,6 +94,7 @@ struct intel_engine_cs { ...@@ -73,6 +94,7 @@ struct intel_engine_cs {
void __iomem *virtual_start; void __iomem *virtual_start;
struct drm_device *dev; struct drm_device *dev;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_ringbuffer *buffer;
u32 head; u32 head;
u32 tail; u32 tail;
...@@ -217,7 +239,7 @@ struct intel_engine_cs { ...@@ -217,7 +239,7 @@ struct intel_engine_cs {
static inline bool static inline bool
intel_ring_initialized(struct intel_engine_cs *ring) intel_ring_initialized(struct intel_engine_cs *ring)
{ {
return ring->obj != NULL; return ring->buffer && ring->obj;
} }
static inline unsigned static inline unsigned
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment