Commit ff37c05a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next

- select igt testing depencies for CONFIG_DRM_I915_DEBUG (Chris)
- track outputs in crtc state and clean up all our ad-hoc connector/encoder
  walking in modest code (Ville)
- demidlayer drm_device/drm_i915_private (Chris Wilson)
- thundering herd fix from Chris Wilson, with lots of help from Tvrtko Ursulin
- piles of assorted clean and fallout from the thundering herd fix
- documentation and more tuning for waitboosting (Chris)
- pooled EU support on bxt (Arun Siluvery)
- bxt support is no longer considered prelimary!
- ring/engine vfunc cleanup from Tvrtko
- introduce intel_wait_for_register helper (Chris)
- opregion updates (Jani Nukla)
- tuning and fixes for wait_for macros (Tvrkto&Imre)
- more kabylake pci ids (Rodrigo)
- pps cleanup and fixes for bxt (Imre)
- move sink crc support over to atomic state (Maarten)
- fix up async fbdev init ordering (Chris)
- fbc fixes from Paulo and Chris

* tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel: (223 commits)
  drm/i915: Update DRIVER_DATE to 20160711
  drm/i915: Select DRM_VGEM for igt
  drm/i915: Select X86_MSR for igt
  drm/i915: Fill unused GGTT with scratch pages for VT-d
  drm/i915: Introduce Kabypoint PCH for Kabylake H/DT.
  drm/i915:gen9: implement WaMediaPoolStateCmdInWABB
  drm/i915: Check for invalid cloning earlier during modeset
  drm/i915: Simplify hdmi_12bpc_possible()
  drm/i915: Kill has_dsi_encoder
  drm/i915: s/INTEL_OUTPUT_DISPLAYPORT/INTEL_OUTPUT_DP/
  drm/i915: Replace some open coded intel_crtc_has_dp_encoder()s
  drm/i915: Kill has_dp_encoder from pipe_config
  drm/i915: Replace manual lvds and sdvo/hdmi counting with intel_crtc_has_type()
  drm/i915: Unify intel_pipe_has_type() and intel_pipe_will_have_type()
  drm/i915: Add output_types bitmask into the crtc state
  drm/i915: Remove encoder type checks from MST suspend/resume
  drm/i915: Don't mark eDP encoders as MST capable
  drm/i915: avoid wait_for_atomic() in non-atomic host2guc_action()
  drm/i915: Group the irq breadcrumb variables into the same cacheline
  drm/i915: Wake up the bottom-half if we steal their interrupt
  ...
parents 6c181c82 0b2c0582
...@@ -18,6 +18,9 @@ config DRM_I915_WERROR ...@@ -18,6 +18,9 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG config DRM_I915_DEBUG
bool "Enable additional driver debugging" bool "Enable additional driver debugging"
depends on DRM_I915 depends on DRM_I915
select PREEMPT_COUNT
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
default n default n
help help
Choose this option to turn on extra driver debugging that may affect Choose this option to turn on extra driver debugging that may affect
......
...@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror ...@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
i915-y := i915_drv.o \ i915-y := i915_drv.o \
i915_irq.o \ i915_irq.o \
i915_params.o \ i915_params.o \
i915_pci.o \
i915_suspend.o \ i915_suspend.o \
i915_sysfs.o \ i915_sysfs.o \
intel_csr.o \ intel_csr.o \
intel_device_info.o \
intel_pm.o \ intel_pm.o \
intel_runtime_pm.o intel_runtime_pm.o
...@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \ ...@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_userptr.o \ i915_gem_userptr.o \
i915_gpu_error.o \ i915_gpu_error.o \
i915_trace_points.o \ i915_trace_points.o \
intel_breadcrumbs.o \
intel_lrc.o \ intel_lrc.o \
intel_mocs.o \ intel_mocs.o \
intel_ringbuffer.o \ intel_ringbuffer.o \
...@@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \ ...@@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \
# virtual gpu code # virtual gpu code
i915-y += i915_vgpu.o i915-y += i915_vgpu.o
# legacy horrors
i915-y += i915_dma.o
ifeq ($(CONFIG_DRM_I915_GVT),y) ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o i915-y += intel_gvt.o
include $(src)/gvt/Makefile include $(src)/gvt/Makefile
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
int i; int i;
lockdep_assert_held(&ctx->i915->dev->struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx); trace_i915_context_free(ctx);
/* /*
...@@ -250,7 +250,7 @@ static struct i915_gem_context * ...@@ -250,7 +250,7 @@ static struct i915_gem_context *
__create_hw_context(struct drm_device *dev, __create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv) struct drm_i915_file_private *file_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int ret; int ret;
...@@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev, ...@@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list); list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv; ctx->i915 = dev_priv;
ctx->ggtt_alignment = get_context_alignment(dev_priv);
if (dev_priv->hw_context_size) { if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj =
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
...@@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, ...@@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
void i915_gem_context_reset(struct drm_device *dev) void i915_gem_context_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev->struct_mutex); lockdep_assert_held(&dev->struct_mutex);
...@@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev)
int i915_gem_context_init(struct drm_device *dev) int i915_gem_context_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the /* Init should only be called once per module load. Eventually the
...@@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev) ...@@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx); return PTR_ERR(ctx);
} }
if (!i915.enable_execlists && ctx->engine[RCS].state) {
int ret;
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
get_context_alignment(dev_priv), 0);
if (ret) {
DRM_ERROR("Failed to pinned default global context (error %d)\n",
ret);
i915_gem_context_unreference(ctx);
return ret;
}
}
dev_priv->kernel_context = ctx; dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
...@@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv) ...@@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
lockdep_assert_held(&dev_priv->dev->struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
if (engine->last_context) { if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine); i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL; engine->last_context = NULL;
} }
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->engine[engine->id].initialised =
engine->init_context == NULL;
} }
/* Force the GPU state to be reinitialised on enabling */ /* Force the GPU state to be restored on enabling */
dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); if (!i915.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (!i915_gem_context_is_default(ctx))
continue;
for_each_engine(engine, dev_priv)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
for_each_engine(engine, dev_priv) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
kce->initialised = true;
}
}
} }
void i915_gem_context_fini(struct drm_device *dev) void i915_gem_context_fini(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *dctx = dev_priv->kernel_context; struct i915_gem_context *dctx = dev_priv->kernel_context;
lockdep_assert_held(&dev->struct_mutex); lockdep_assert_held(&dev->struct_mutex);
if (!i915.enable_execlists && dctx->engine[RCS].state)
i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL; dev_priv->kernel_context = NULL;
...@@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
get_context_alignment(engine->i915), to->ggtt_alignment,
0); 0);
if (ret) if (ret)
return ret; return ret;
...@@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
WARN_ON(i915.enable_execlists); WARN_ON(i915.enable_execlists);
lockdep_assert_held(&req->i915->dev->struct_mutex); lockdep_assert_held(&req->i915->drm.struct_mutex);
if (!req->ctx->engine[engine->id].state) { if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
...@@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else else
args->value = to_i915(dev)->ggtt.base.total; args->value = to_i915(dev)->ggtt.base.total;
break; break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ...@@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
} }
break; break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size) {
ret = -EINVAL;
} else {
if (args->value)
ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
else
ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
}
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ...@@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file) void *data, struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reset_stats *args = data; struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs; struct i915_ctx_hang_stats *hs;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
......
...@@ -33,6 +33,37 @@ ...@@ -33,6 +33,37 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
if (i915.enable_execlists)
return 0;
for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *req;
int ret;
if (engine->last_context == NULL)
continue;
if (engine->last_context == dev_priv->kernel_context)
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
return ret;
}
return 0;
}
static bool static bool
mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free(struct i915_vma *vma, struct list_head *unwind)
{ {
...@@ -150,11 +181,19 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -150,11 +181,19 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
/* Only idle the GPU and repeat the search once */ /* Only idle the GPU and repeat the search once */
if (pass++ == 0) { if (pass++ == 0) {
ret = i915_gpu_idle(dev); struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_is_ggtt(vm)) {
ret = switch_to_pinned_context(dev_priv);
if (ret)
return ret;
}
ret = i915_gem_wait_for_idle(dev_priv);
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(to_i915(dev)); i915_gem_retire_requests(dev_priv);
goto search_again; goto search_again;
} }
...@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
trace_i915_gem_evict_vm(vm); trace_i915_gem_evict_vm(vm);
if (do_idle) { if (do_idle) {
ret = i915_gpu_idle(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
if (i915_is_ggtt(vm)) {
ret = switch_to_pinned_context(dev_priv);
if (ret)
return ret;
}
ret = i915_gem_wait_for_idle(dev_priv);
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(to_i915(vm->dev)); i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list)); WARN_ON(!list_empty(&vm->active_list));
} }
......
...@@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req) struct drm_i915_gem_request *req)
{ {
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
int ret, i; int ret, i;
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
...@@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, ...@@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
{ {
struct drm_device *dev = params->dev; struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine; struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
u64 exec_start, exec_len; u64 exec_start, exec_len;
int instp_mode; int instp_mode;
u32 instp_mask; u32 instp_mask;
...@@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) ...@@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
/* Check whether the file_priv has already selected one ring. */ /* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) { if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */ /* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->dev->struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1; dev_priv->mm.bsd_ring_dispatch_index ^= 1;
mutex_unlock(&dev_priv->dev->struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
return file_priv->bsd_ring; return file_priv->bsd_ring;
...@@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags |= I915_DISPATCH_RS; dispatch_flags |= I915_DISPATCH_RS;
} }
/* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
static void i965_write_fence_reg(struct drm_device *dev, int reg, static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t fence_reg_lo, fence_reg_hi; i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift; int fence_pitch_shift;
...@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, ...@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
static void i915_write_fence_reg(struct drm_device *dev, int reg, static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
u32 val; u32 val;
if (obj) { if (obj) {
...@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
static void i830_write_fence_reg(struct drm_device *dev, int reg, static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val; uint32_t val;
if (obj) { if (obj) {
...@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) ...@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
static void i915_gem_write_fence(struct drm_device *dev, int reg, static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
/* Ensure that all CPU reads are completed before installing a fence /* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence. * and all writes before removing the fence.
...@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, ...@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence, struct drm_i915_fence_reg *fence,
bool enable) bool enable)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int reg = fence_number(dev_priv, fence); int reg = fence_number(dev_priv, fence);
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
...@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) ...@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
int int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj) i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_i915_fence_reg *fence; struct drm_i915_fence_reg *fence;
int ret; int ret;
...@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) ...@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
static struct drm_i915_fence_reg * static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev) i915_find_fence_reg(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_fence_reg *reg, *avail; struct drm_i915_fence_reg *reg, *avail;
int i; int i;
...@@ -367,7 +367,7 @@ int ...@@ -367,7 +367,7 @@ int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj) i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
bool enable = obj->tiling_mode != I915_TILING_NONE; bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg; struct drm_i915_fence_reg *reg;
int ret; int ret;
...@@ -433,7 +433,7 @@ bool ...@@ -433,7 +433,7 @@ bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{ {
if (obj->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
WARN_ON(!ggtt_vma || WARN_ON(!ggtt_vma ||
...@@ -457,7 +457,7 @@ void ...@@ -457,7 +457,7 @@ void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{ {
if (obj->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--; dev_priv->fence_regs[obj->fence_reg].pin_count--;
} }
...@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) ...@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
*/ */
void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_restore_fences(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
int i; int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
...@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev) ...@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev) i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
......
This diff is collapsed.
...@@ -390,27 +390,27 @@ struct i915_hw_ppgtt { ...@@ -390,27 +390,27 @@ struct i915_hw_ppgtt {
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
}; };
/* For each pde iterates over every pde between from start until start + length. /*
* If start, and start+length are not perfectly divisible, the macro will round * gen6_for_each_pde() iterates over every pde from start until start+length.
* down, and up as needed. The macro modifies pde, start, and length. Dev is * If start and start+length are not perfectly divisible, the macro will round
* only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, * down and up as needed. Start=0 and length=2G effectively iterates over
* and length = 2G effectively iterates over every PDE in the system. * every PDE in the system. The macro modifies ALL its parameters except 'pd',
* * so each of the other parameters should preferably be a simple variable, or
* XXX: temp is not actually needed, but it saves doing the ALIGN operation. * at most an lvalue with no side-effects!
*/ */
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ #define gen6_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen6_pde_index(start); \ for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES ? \ length > 0 && iter < I915_PDES && \
(pt = (pd)->page_table[iter]), 1 : 0; \ (pt = (pd)->page_table[iter], true); \
iter++, \ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ temp = min(temp - start, length); \
temp = min_t(unsigned, temp, length), \ start += temp, length -= temp; }), ++iter)
start += temp, length -= temp)
#define gen6_for_all_pdes(pt, pd, iter) \
#define gen6_for_all_pdes(pt, ppgtt, iter) \ for (iter = 0; \
for (iter = 0; \ iter < I915_PDES && \
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ (pt = (pd)->page_table[iter], true); \
iter++) ++iter)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment