Commit e3670319 authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: abstract away ring-specific irq_get/put

Inspired by Ben Widawsky's patch for gen6+. Now after restructuring
how we set up the ring vtables and parameters, we can do this right.

This kills the bsd specific get/put_irq functions, they're now the
same.
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Signed-Off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 686cb5f9
...@@ -645,7 +645,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) ...@@ -645,7 +645,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
} }
static bool static bool
render_ring_get_irq(struct intel_ring_buffer *ring) i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -657,9 +657,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -657,9 +657,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
if (ring->irq_refcount++ == 0) { if (ring->irq_refcount++ == 0) {
if (INTEL_INFO(dev)->gen >= 5) if (INTEL_INFO(dev)->gen >= 5)
ironlake_enable_irq(dev_priv, ironlake_enable_irq(dev_priv,
GT_PIPE_NOTIFY | GT_USER_INTERRUPT); ring->irq_enable_mask);
else else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT); i915_enable_irq(dev_priv, ring->irq_enable_mask);
} }
spin_unlock(&ring->irq_lock); spin_unlock(&ring->irq_lock);
...@@ -667,7 +667,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -667,7 +667,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
} }
static void static void
render_ring_put_irq(struct intel_ring_buffer *ring) i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -676,10 +676,9 @@ render_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -676,10 +676,9 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
if (--ring->irq_refcount == 0) { if (--ring->irq_refcount == 0) {
if (INTEL_INFO(dev)->gen >= 5) if (INTEL_INFO(dev)->gen >= 5)
ironlake_disable_irq(dev_priv, ironlake_disable_irq(dev_priv,
GT_USER_INTERRUPT | ring->irq_enable_mask);
GT_PIPE_NOTIFY);
else else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT); i915_disable_irq(dev_priv, ring->irq_enable_mask);
} }
spin_unlock(&ring->irq_lock); spin_unlock(&ring->irq_lock);
} }
...@@ -795,42 +794,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -795,42 +794,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_put(dev_priv); gen6_gt_force_wake_put(dev_priv);
} }
static bool
bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled)
return false;
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
if (IS_G4X(dev))
i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
else
ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
}
spin_unlock(&ring->irq_lock);
return true;
}
static void
bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
if (IS_G4X(dev))
i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
else
ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
}
spin_unlock(&ring->irq_lock);
}
static int static int
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{ {
...@@ -1338,14 +1301,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -1338,14 +1301,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request; ring->add_request = pc_render_add_request;
ring->flush = render_ring_flush; ring->flush = render_ring_flush;
ring->get_seqno = pc_render_get_seqno; ring->get_seqno = pc_render_get_seqno;
ring->irq_get = render_ring_get_irq; ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = render_ring_put_irq; ring->irq_put = i9xx_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
} else { } else {
ring->add_request = render_ring_add_request; ring->add_request = render_ring_add_request;
ring->flush = render_ring_flush; ring->flush = render_ring_flush;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->irq_get = render_ring_get_irq; ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = render_ring_put_irq; ring->irq_put = i9xx_ring_put_irq;
ring->irq_enable_mask = I915_USER_INTERRUPT;
} }
ring->write_tail = ring_write_tail; ring->write_tail = ring_write_tail;
ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
...@@ -1377,14 +1342,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ...@@ -1377,14 +1342,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->add_request = pc_render_add_request; ring->add_request = pc_render_add_request;
ring->flush = render_ring_flush; ring->flush = render_ring_flush;
ring->get_seqno = pc_render_get_seqno; ring->get_seqno = pc_render_get_seqno;
ring->irq_get = render_ring_get_irq; ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = render_ring_put_irq; ring->irq_put = i9xx_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
} else { } else {
ring->add_request = render_ring_add_request; ring->add_request = render_ring_add_request;
ring->flush = render_ring_flush; ring->flush = render_ring_flush;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->irq_get = render_ring_get_irq; ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = render_ring_put_irq; ring->irq_put = i9xx_ring_put_irq;
ring->irq_enable_mask = I915_USER_INTERRUPT;
} }
ring->write_tail = ring_write_tail; ring->write_tail = ring_write_tail;
ring->dispatch_execbuffer = render_ring_dispatch_execbuffer; ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
...@@ -1451,8 +1418,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ...@@ -1451,8 +1418,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush; ring->flush = bsd_ring_flush;
ring->add_request = ring_add_request; ring->add_request = ring_add_request;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->irq_get = bsd_ring_get_irq; ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = bsd_ring_put_irq; ring->irq_put = i9xx_ring_put_irq;
if (IS_GEN5(dev))
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
else
ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
ring->dispatch_execbuffer = ring_dispatch_execbuffer; ring->dispatch_execbuffer = ring_dispatch_execbuffer;
} }
ring->init = init_ring_common; ring->init = init_ring_common;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment