Commit adc31849 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel into drm-next

- plane handling refactoring from Matt Roper and Gustavo Padovan in prep for
  atomic updates
- fixes and more patches for the seqno to request transformation from John
- docbook for fbc from Rodrigo
- prep work for dual-link dsi from Gaurav Signh
- crc fixes from Ville
- special ggtt views infrastructure from Tvrtko Ursulin
- shadow patch copying for the cmd parser from Brad Volkin
- execlist and full ppgtt by default on gen8, for testing for now

* tag 'drm-intel-next-2014-12-19' of git://anongit.freedesktop.org/drm-intel: (131 commits)
  drm/i915: Update DRIVER_DATE to 20141219
  drm/i915: Hold runtime PM during plane commit
  drm/i915: Organize bind_vma funcs
  drm/i915: Organize INSTDONE report for future.
  drm/i915: Organize PDP regs report for future.
  drm/i915: Organize PPGTT init
  drm/i915: Organize Fence registers for future enablement.
  drm/i915: tame the chattermouth (v2)
  drm/i915: Warn about missing context state workarounds only once
  drm/i915: Use true PPGTT in Gen8+ when execlists are enabled
  drm/i915: Skip gunit save/restore for cherryview
  drm/i915/chv: Use timeout mode for RC6 on chv
  drm/i915: Add GPGPU_THREADS_DISPATCHED to the register whitelist
  drm/i915: Tidy up execbuffer command parsing code
  drm/i915: Mark shadow batch buffers as purgeable
  drm/i915: Use batch length instead of object size in command parser
  drm/i915: Use batch pools with the command parser
  drm/i915: Implement a framework for batch buffer pools
  drm/i915: fix use after free during eDP encoder destroying
  drm/i915/skl: Skylake also supports DP MST
  ...
parents c93546a5 0e2cfc00
...@@ -4035,6 +4035,11 @@ int num_ioctls;</synopsis> ...@@ -4035,6 +4035,11 @@ int num_ioctls;</synopsis>
<title>Panel Self Refresh PSR (PSR/SRD)</title> <title>Panel Self Refresh PSR (PSR/SRD)</title>
!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD) !Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
!Idrivers/gpu/drm/i915/intel_psr.c !Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
<title>Frame Buffer Compression (FBC)</title>
!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
!Idrivers/gpu/drm/i915/intel_fbc.c
</sect2> </sect2>
<sect2> <sect2>
<title>DPIO</title> <title>DPIO</title>
...@@ -4138,12 +4143,22 @@ int num_ioctls;</synopsis> ...@@ -4138,12 +4143,22 @@ int num_ioctls;</synopsis>
<title>Batchbuffer Parsing</title> <title>Batchbuffer Parsing</title>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser !Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c !Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
<title>Batchbuffer Pools</title>
!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
</sect2> </sect2>
<sect2> <sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title> <title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists !Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c !Idrivers/gpu/drm/i915/intel_lrc.c
</sect2> </sect2>
<sect2>
<title>Global GTT views</title>
!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
!Idrivers/gpu/drm/i915/i915_gem_gtt.c
</sect2>
</sect1> </sect1>
<sect1> <sect1>
......
...@@ -2658,6 +2658,27 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) ...@@ -2658,6 +2658,27 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
} }
EXPORT_SYMBOL(drm_mode_set_config_internal); EXPORT_SYMBOL(drm_mode_set_config_internal);
/**
* drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode
* @mode: mode to query
* @hdisplay: hdisplay value to fill in
* @vdisplay: vdisplay value to fill in
*
* The vdisplay value will be doubled if the specified mode is a stereo mode of
* the appropriate layout.
*/
void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
struct drm_display_mode adjusted;
drm_mode_copy(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
*vdisplay = adjusted.crtc_vdisplay;
}
EXPORT_SYMBOL(drm_crtc_get_hv_timing);
/** /**
* drm_crtc_check_viewport - Checks that a framebuffer is big enough for the * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the
* CRTC viewport * CRTC viewport
...@@ -2675,16 +2696,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, ...@@ -2675,16 +2696,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
{ {
int hdisplay, vdisplay; int hdisplay, vdisplay;
hdisplay = mode->hdisplay; drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
vdisplay = mode->vdisplay;
if (drm_mode_is_stereo(mode)) {
struct drm_display_mode adjusted = *mode;
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
hdisplay = adjusted.crtc_hdisplay;
vdisplay = adjusted.crtc_vdisplay;
}
if (crtc->invert_dimensions) if (crtc->invert_dimensions)
swap(hdisplay, vdisplay); swap(hdisplay, vdisplay);
......
...@@ -739,6 +739,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh); ...@@ -739,6 +739,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for * buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full"). * "frame packing" or "side by side full").
* - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
* be performed for doublescan and vscan > 1 modes respectively.
*/ */
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{ {
...@@ -765,19 +767,23 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) ...@@ -765,19 +767,23 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
} }
} }
if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
if (p->flags & DRM_MODE_FLAG_DBLSCAN) { if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2; p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2; p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2; p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2; p->crtc_vtotal *= 2;
} }
}
if (!(adjust_flags & CRTC_NO_VSCAN)) {
if (p->vscan > 1) { if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan; p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan; p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan; p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan; p->crtc_vtotal *= p->vscan;
} }
}
if (adjust_flags & CRTC_STEREO_DOUBLE) { if (adjust_flags & CRTC_STEREO_DOUBLE) {
unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK; unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
......
...@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o ...@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code # GEM code
i915-y += i915_cmd_parser.o \ i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_context.o \ i915_gem_context.o \
i915_gem_render_state.o \ i915_gem_render_state.o \
i915_gem_debug.o \ i915_gem_debug.o \
...@@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \ ...@@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \
i915-y += intel_audio.o \ i915-y += intel_audio.o \
intel_bios.o \ intel_bios.o \
intel_display.o \ intel_display.o \
intel_fbc.o \
intel_fifo_underrun.o \ intel_fifo_underrun.o \
intel_frontbuffer.o \ intel_frontbuffer.o \
intel_modes.o \ intel_modes.o \
......
...@@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = { ...@@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
CMD( MI_PREDICATE, SMI, F, 1, S ), CMD( MI_PREDICATE, SMI, F, 1, S ),
CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ), CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ), CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ), CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
...@@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_SET_PREDICATE, SMI, F, 1, S ), CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
CMD( MI_RS_CONTROL, SMI, F, 1, S ), CMD( MI_RS_CONTROL, SMI, F, 1, S ),
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
...@@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
static const struct drm_i915_cmd_descriptor video_cmds[] = { static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{ .bits = {{
.offset = 0, .offset = 0,
...@@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { ...@@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
static const struct drm_i915_cmd_descriptor vecs_cmds[] = { static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
.bits = {{ .bits = {{
.offset = 0, .offset = 0,
...@@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { ...@@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
#define REG64(addr) (addr), (addr + sizeof(u32)) #define REG64(addr) (addr), (addr + sizeof(u32))
static const u32 gen7_render_regs[] = { static const u32 gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT), REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT), REG64(DS_INVOCATION_COUNT),
REG64(IA_VERTICES_COUNT), REG64(IA_VERTICES_COUNT),
...@@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) ...@@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient = u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
if (client == INSTR_MI_CLIENT) if (client == INSTR_MI_CLIENT)
return 0x3F; return 0x3F;
else if (client == INSTR_RC_CLIENT) { else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT) if (subclient == INSTR_MEDIA_SUBCLIENT) {
return 0xFFF; if (op == 6)
return 0xFFFF;
else else
return 0xFFF;
} else
return 0xFF; return 0xFF;
} }
...@@ -716,14 +725,14 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) ...@@ -716,14 +725,14 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring)); BUG_ON(!validate_regs_sorted(ring));
if (hash_empty(ring->cmd_hash)) { WARN_ON(!hash_empty(ring->cmd_hash));
ret = init_hash_table(ring, cmd_tables, cmd_table_count); ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) { if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n"); DRM_ERROR("CMD: cmd_parser_init failed!\n");
fini_hash_table(ring); fini_hash_table(ring);
return ret; return ret;
} }
}
ring->needs_cmd_parser = true; ring->needs_cmd_parser = true;
...@@ -840,6 +849,69 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj) ...@@ -840,6 +849,69 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj)
return (u32*)addr; return (u32*)addr;
} }
/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
struct drm_i915_gem_object *src_obj,
u32 batch_start_offset,
u32 batch_len)
{
int ret = 0;
int needs_clflush = 0;
u32 *src_base, *dest_base = NULL;
u32 *src_addr, *dest_addr;
u32 offset = batch_start_offset / sizeof(*dest_addr);
u32 end = batch_start_offset + batch_len;
if (end > dest_obj->base.size || end > src_obj->base.size)
return ERR_PTR(-E2BIG);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ERR_PTR(ret);
}
src_base = vmap_batch(src_obj);
if (!src_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
ret = -ENOMEM;
goto unpin_src;
}
src_addr = src_base + offset;
if (needs_clflush)
drm_clflush_virt_range((char *)src_addr, batch_len);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
goto unmap_src;
}
dest_base = vmap_batch(dest_obj);
if (!dest_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
ret = -ENOMEM;
goto unmap_src;
}
dest_addr = dest_base + offset;
if (batch_start_offset != 0)
memset((u8 *)dest_base, 0, batch_start_offset);
memcpy(dest_addr, src_addr, batch_len);
memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
unmap_src:
vunmap(src_base);
unpin_src:
i915_gem_object_unpin_pages(src_obj);
return ret ? ERR_PTR(ret) : dest_base;
}
/** /**
* i915_needs_cmd_parser() - should a given ring use software command parsing? * i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question * @ring: the ring in question
...@@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring, ...@@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute * @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question * @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts * @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master? * @is_master: is the submitting process the drm master?
* *
* Parses the specified batch buffer looking for privilege violations as * Parses the specified batch buffer looking for privilege violations as
...@@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring, ...@@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*/ */
int i915_parse_cmds(struct intel_engine_cs *ring, int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len,
bool is_master) bool is_master)
{ {
int ret = 0; int ret = 0;
u32 *cmd, *batch_base, *batch_end; u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 }; struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
return ret; return -1;
} }
batch_base = vmap_batch(batch_obj); batch_base = copy_batch(shadow_batch_obj, batch_obj,
if (!batch_base) { batch_start_offset, batch_len);
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); if (IS_ERR(batch_base)) {
i915_gem_object_unpin_pages(batch_obj); DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
return -ENOMEM; i915_gem_object_ggtt_unpin(shadow_batch_obj);
return PTR_ERR(batch_base);
} }
if (needs_clflush)
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
cmd = batch_base + (batch_start_offset / sizeof(*cmd)); cmd = batch_base + (batch_start_offset / sizeof(*cmd));
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) { while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc; const struct drm_i915_cmd_descriptor *desc;
...@@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, ...@@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
} }
vunmap(batch_base); vunmap(batch_base);
i915_gem_object_ggtt_unpin(shadow_batch_obj);
i915_gem_object_unpin_pages(batch_obj);
return ret; return ret;
} }
...@@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void) ...@@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void)
* hardware parsing enabled (so does not allow new use cases). * hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and * 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers. * MI_PREDICATE_SRC1 registers.
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
*/ */
return 2; return 3;
} }
This diff is collapsed.
...@@ -928,6 +928,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -928,6 +928,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
...@@ -1004,6 +1005,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -1004,6 +1005,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv); kfree(file_priv);
} }
static int
i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -ENODEV;
}
const struct drm_ioctl_desc i915_ioctls[] = { const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
...@@ -1025,8 +1033,8 @@ const struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1025,8 +1033,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
......
...@@ -841,6 +841,8 @@ int i915_reset(struct drm_device *dev) ...@@ -841,6 +841,8 @@ int i915_reset(struct drm_device *dev)
return ret; return ret;
} }
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */ /* Ok, now get things going again... */
/* /*
...@@ -1299,6 +1301,8 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1299,6 +1301,8 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
err = vlv_allow_gt_wake(dev_priv, false); err = vlv_allow_gt_wake(dev_priv, false);
if (err) if (err)
goto err2; goto err2;
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_save_gunit_s0ix_state(dev_priv); vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false); err = vlv_force_gfx_clock(dev_priv, false);
...@@ -1330,6 +1334,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, ...@@ -1330,6 +1334,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/ */
ret = vlv_force_gfx_clock(dev_priv, true); ret = vlv_force_gfx_clock(dev_priv, true);
if (!IS_CHERRYVIEW(dev_priv->dev))
vlv_restore_gunit_s0ix_state(dev_priv); vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true); err = vlv_allow_gt_wake(dev_priv, true);
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
/**
* DOC: batch pool
*
* In order to submit batch buffers as 'secure', the software command parser
* must ensure that a batch buffer cannot be modified after parsing. It does
* this by copying the user provided batch buffer contents to a kernel owned
* buffer from which the hardware will actually execute, and by carefully
* managing the address space bindings for such buffers.
*
* The batch pool framework provides a mechanism for the driver to manage a
* set of scratch buffers to use for this purpose. The framework can be
* extended to support other uses cases should they arise.
*/
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
* @dev: the drm device
* @pool: the batch buffer pool
*/
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool)
{
pool->dev = dev;
INIT_LIST_HEAD(&pool->cache_list);
}
/**
* i915_gem_batch_pool_fini() - clean up a batch buffer pool
* @pool: the pool to clean up
*
* Note: Callers must hold the struct_mutex.
*/
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
while (!list_empty(&pool->cache_list)) {
struct drm_i915_gem_object *obj =
list_first_entry(&pool->cache_list,
struct drm_i915_gem_object,
batch_pool_list);
WARN_ON(obj->active);
list_del_init(&obj->batch_pool_list);
drm_gem_object_unreference(&obj->base);
}
}
/**
* i915_gem_batch_pool_get() - select a buffer from the pool
* @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer
*
* Finds or allocates a batch buffer in the pool with at least the requested
* size. The caller is responsible for any domain, active/inactive, or
* purgeability management for the returned buffer.
*
* Note: Callers must hold the struct_mutex
*
* Return: the selected batch buffer object
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next;
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
list_for_each_entry_safe(tmp, next,
&pool->cache_list, batch_pool_list) {
if (tmp->active)
continue;
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_list);
drm_gem_object_unreference(&tmp->base);
continue;
}
/*
* Select a buffer that is at least as big as needed
* but not 'too much' bigger. A better way to do this
* might be to bucket the pool objects based on size.
*/
if (tmp->base.size >= size &&
tmp->base.size <= (2 * size)) {
obj = tmp;
break;
}
}
if (!obj) {
obj = i915_gem_alloc_object(pool->dev, size);
if (!obj)
return ERR_PTR(-ENOMEM);
list_add_tail(&obj->batch_pool_list, &pool->cache_list);
}
else
/* Keep list in LRU order */
list_move_tail(&obj->batch_pool_list, &pool->cache_list);
obj->madv = I915_MADV_WILLNEED;
return obj;
}
...@@ -408,9 +408,20 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv) ...@@ -408,9 +408,20 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
BUG_ON(!dev_priv->ring[RCS].default_context); BUG_ON(!dev_priv->ring[RCS].default_context);
if (i915.enable_execlists) if (i915.enable_execlists) {
return 0; for_each_ring(ring, dev_priv, i) {
if (ring->init_context) {
ret = ring->init_context(ring,
ring->default_context);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
return ret;
}
}
}
} else
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context); ret = i915_switch_context(ring, ring->default_context);
if (ret) if (ret)
...@@ -611,9 +622,14 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -611,9 +622,14 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out; goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND)) if (!(vma->bound & GLOBAL_BIND)) {
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, ret = i915_vma_bind(vma,
to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND); GLOBAL_BIND);
/* This shouldn't ever fail. */
if (WARN_ONCE(ret, "GGTT context bind failed!"))
goto unpin_out;
}
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
...@@ -651,7 +667,8 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -651,7 +667,8 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->legacy_hw_ctx.rcs_state->dirty = 1; from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); BUG_ON(i915_gem_request_get_ring(
from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
...@@ -671,10 +688,6 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -671,10 +688,6 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret) if (ret)
DRM_ERROR("ring init context: %d\n", ret); DRM_ERROR("ring init context: %d\n", ret);
} }
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
} }
return 0; return 0;
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29) #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024) #define BATCH_OFFSET_BIAS (256*1024)
...@@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) ...@@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN) if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--; vma->pin_count--;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); if (entry->flags & __EXEC_OBJECT_PURGEABLE)
obj->madv = I915_MADV_DONTNEED;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
__EXEC_OBJECT_HAS_PIN |
__EXEC_OBJECT_PURGEABLE);
} }
static void eb_destroy(struct eb_vmas *eb) static void eb_destroy(struct eb_vmas *eb)
...@@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* through the ppgtt for non_secure batchbuffers. */ * through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) && if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!(target_vma->bound & GLOBAL_BIND))) !(target_vma->bound & GLOBAL_BIND))) {
target_vma->bind_vma(target_vma, target_i915_obj->cache_level, ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND); GLOBAL_BIND);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */ /* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
...@@ -943,7 +952,7 @@ void ...@@ -943,7 +952,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas, i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
u32 seqno = intel_ring_get_seqno(ring); struct drm_i915_gem_request *req = intel_ring_get_request(ring);
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
...@@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, ring); i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) { if (obj->base.write_domain) {
obj->dirty = 1; obj->dirty = 1;
obj->last_write_seqno = seqno; i915_gem_request_assign(&obj->last_write_req, req);
intel_fb_obj_invalidate(obj, ring); intel_fb_obj_invalidate(obj, ring);
...@@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
} }
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
obj->last_fenced_seqno = seqno; i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
...@@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
(void)__i915_add_request(ring, file, obj, NULL); (void)__i915_add_request(ring, file, obj);
} }
static int static int
...@@ -1060,6 +1069,65 @@ i915_emit_box(struct intel_engine_cs *ring, ...@@ -1060,6 +1069,65 @@ i915_emit_box(struct intel_engine_cs *ring,
return 0; return 0;
} }
static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
u32 batch_len,
bool is_master,
u32 *flags)
{
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
batch_obj->base.size);
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
ret = i915_parse_cmds(ring,
batch_obj,
shadow_batch_obj,
batch_start_offset,
batch_len,
is_master);
if (ret) {
if (ret == -EACCES)
return batch_obj;
} else {
struct i915_vma *vma;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas);
shadow_batch_obj->base.pending_read_domains =
batch_obj->base.pending_read_domains;
/*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically
* don't want that set when the command parser is
* enabled.
*
* FIXME: with aliasing ppgtt, buffers that should only
* be in ggtt still end up in the aliasing ppgtt. remove
* this check when that is fixed.
*/
if (USES_FULL_PPGTT(dev))
*flags |= I915_DISPATCH_SECURE;
}
return ret ? ERR_PTR(ret) : shadow_batch_obj;
}
int int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
...@@ -1208,7 +1276,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ...@@ -1208,7 +1276,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
return ret; return ret;
} }
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
...@@ -1277,6 +1345,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1277,6 +1345,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
struct intel_context *ctx; struct intel_context *ctx;
struct i915_address_space *vm; struct i915_address_space *vm;
...@@ -1393,28 +1462,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1393,28 +1462,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) { if (i915_needs_cmd_parser(ring)) {
ret = i915_parse_cmds(ring, batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj, batch_obj,
args->batch_start_offset, args->batch_start_offset,
file->is_master); args->batch_len,
if (ret) { file->is_master,
if (ret != -EACCES) &flags);
if (IS_ERR(batch_obj)) {
ret = PTR_ERR(batch_obj);
goto err; goto err;
} else {
/*
* XXX: Actually do this when enabling batch copy...
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
* from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically don't
* want that set when the command parser is enabled.
*/
} }
} }
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt. * batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */ * hsw should have this fixed, but bdw mucks it up again. */
......
...@@ -30,6 +30,68 @@ ...@@ -30,6 +30,68 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
/**
* DOC: Global GTT views
*
* Background and previous state
*
* Historically objects could exists (be bound) in global GTT space only as
* singular instances with a view representing all of the object's backing pages
* in a linear fashion. This view will be called a normal view.
*
* To support multiple views of the same object, where the number of mapped
* pages is not equal to the backing store, or where the layout of the pages
* is not linear, concept of a GGTT view was added.
*
* One example of an alternative view is a stereo display driven by a single
* image. In this case we would have a framebuffer looking like this
* (2x2 pages):
*
* 12
* 34
*
* Above would represent a normal GGTT view as normally mapped for GPU or CPU
* rendering. In contrast, fed to the display engine would be an alternative
* view which could look something like this:
*
* 1212
* 3434
*
* In this example both the size and layout of pages in the alternative view is
* different from the normal view.
*
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
* i915_ggtt_view_type and struct i915_ggtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _view suffix. They take the struct i915_ggtt_view parameter
* encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
* globally const i915_ggtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
* 2. Extend the metadata in the i915_ggtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
* i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
* struct i915_ggtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
const struct i915_ggtt_view i915_ggtt_view_normal;
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
...@@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
if (IS_GEN8(dev))
has_full_ppgtt = false; /* XXX why? */
/* /*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for * We don't allow disabling PPGTT for gen9+ as it's a requirement for
...@@ -72,6 +132,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -72,6 +132,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return 0; return 0;
} }
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2;
else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
} }
...@@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, ...@@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED; pte |= GEN6_PTE_UNCACHED;
break; break;
default: default:
WARN_ON(1); MISSING_CASE(level);
} }
return pte; return pte;
...@@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, ...@@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
pte |= GEN6_PTE_UNCACHED; pte |= GEN6_PTE_UNCACHED;
break; break;
default: default:
WARN_ON(1); MISSING_CASE(level);
} }
return pte; return pte;
...@@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) ...@@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
if (INTEL_INFO(dev)->gen < 8) if (INTEL_INFO(dev)->gen < 8)
return gen6_ppgtt_init(ppgtt); return gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else else
BUG(); return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
} }
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{ {
...@@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) ...@@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 8) else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev); gen8_ppgtt_enable(dev);
else else
WARN_ON(1); MISSING_CASE(INTEL_INFO(dev)->gen);
if (ppgtt) { if (ppgtt) {
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
...@@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* The bind_vma code tries to be smart about tracking mappings. /* The bind_vma code tries to be smart about tracking mappings.
* Unfortunately above, we've just wiped out the mappings * Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it. * without telling our object about it. So we need to fake it.
*
* Bind is not expected to fail since this is only called on
* resume and assumption is all requirements exist already.
*/ */
vma->bound &= ~GLOBAL_BIND; vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
} }
...@@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, ...@@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
BUG_ON(!i915_is_ggtt(vma->vm)); BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
vma->bound = GLOBAL_BIND; vma->bound = GLOBAL_BIND;
} }
...@@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, ...@@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!(vma->bound & GLOBAL_BIND) || if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) { (cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages, vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
vma->node.start, vma->node.start,
cache_level, flags); cache_level, flags);
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
...@@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, ...@@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level))) { (cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base, appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages, vma->ggtt_view.pages,
vma->node.start, vma->node.start,
cache_level, flags); cache_level, flags);
vma->bound |= LOCAL_BIND; vma->bound |= LOCAL_BIND;
...@@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev) ...@@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev)
} }
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm) struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{ {
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL) if (vma == NULL)
...@@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, ...@@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->exec_list); INIT_LIST_HEAD(&vma->exec_list);
vma->vm = vm; vma->vm = vm;
vma->obj = obj; vma->obj = obj;
vma->ggtt_view = *view;
switch (INTEL_INFO(vm->dev)->gen) { if (INTEL_INFO(vm->dev)->gen >= 6) {
case 9:
case 8:
case 7:
case 6:
if (i915_is_ggtt(vm)) { if (i915_is_ggtt(vm)) {
vma->unbind_vma = ggtt_unbind_vma; vma->unbind_vma = ggtt_unbind_vma;
vma->bind_vma = ggtt_bind_vma; vma->bind_vma = ggtt_bind_vma;
...@@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, ...@@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->unbind_vma = ppgtt_unbind_vma; vma->unbind_vma = ppgtt_unbind_vma;
vma->bind_vma = ppgtt_bind_vma; vma->bind_vma = ppgtt_bind_vma;
} }
break; } else {
case 5:
case 4:
case 3:
case 2:
BUG_ON(!i915_is_ggtt(vm)); BUG_ON(!i915_is_ggtt(vm));
vma->unbind_vma = i915_ggtt_unbind_vma; vma->unbind_vma = i915_ggtt_unbind_vma;
vma->bind_vma = i915_ggtt_bind_vma; vma->bind_vma = i915_ggtt_bind_vma;
break;
default:
BUG();
} }
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else {
list_add_tail(&vma->vma_link, &obj->vma_list); list_add_tail(&vma->vma_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm)); i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
return vma; return vma;
} }
struct i915_vma * struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
struct i915_address_space *vm) struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{ {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_gem_obj_to_vma(obj, vm); vma = i915_gem_obj_to_vma_view(obj, vm, view);
if (!vma) if (!vma)
vma = __i915_gem_vma_create(obj, vm); vma = __i915_gem_vma_create(obj, vm, view);
return vma; return vma;
} }
static inline
int i915_get_vma_pages(struct i915_vma *vma)
{
if (vma->ggtt_view.pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->ggtt_view.pages = vma->obj->pages;
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
if (!vma->ggtt_view.pages) {
DRM_ERROR("Failed to get pages for VMA view type %u!\n",
vma->ggtt_view.type);
return -EINVAL;
}
return 0;
}
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
int ret = i915_get_vma_pages(vma);
if (ret)
return ret;
vma->bind_vma(vma, cache_level, flags);
return 0;
}
...@@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; ...@@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
};
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
struct sg_table *pages;
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
enum i915_cache_level; enum i915_cache_level;
/** /**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a * A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the * VMA's presence cannot be guaranteed before binding, or after unbinding the
...@@ -129,6 +142,15 @@ struct i915_vma { ...@@ -129,6 +142,15 @@ struct i915_vma {
#define PTE_READ_ONLY (1<<2) #define PTE_READ_ONLY (1<<2)
unsigned int bound : 4; unsigned int bound : 4;
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
* i915_ggtt_view_type is used to distinguish between those entries.
* The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
struct i915_ggtt_view ggtt_view;
/** This object's place on the active/inactive lists */ /** This object's place on the active/inactive lists */
struct list_head mm_list; struct list_head mm_list;
...@@ -146,11 +168,10 @@ struct i915_vma { ...@@ -146,11 +168,10 @@ struct i915_vma {
/** /**
* How many users have pinned this object in GTT space. The following * How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl * users can each hold at most one reference: pwrite/pread, execbuffer
* (via user_pin_count), execbuffer (objects are not allowed multiple * (objects are not allowed multiple times for the same batchbuffer),
* times for the same batchbuffer), and the framebuffer code. When * and the framebuffer code. When switching/pageflipping, the
* switching/pageflipping, the framebuffer code has at most two buffers * framebuffer code has at most two buffers pinned per crtc.
* pinned per crtc.
* *
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */ * bits with absolutely no headroom. So use 4 bits. */
...@@ -182,7 +203,7 @@ struct i915_address_space { ...@@ -182,7 +203,7 @@ struct i915_address_space {
* List of objects currently involved in rendering. * List of objects currently involved in rendering.
* *
* Includes buffers having the contents of their GPU caches * Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno * flushed, not necessarily primitives. last_read_req
* represents when the rendering involved will be completed. * represents when the rendering involved will be completed.
* *
* A reference is held on the buffer while on this list. * A reference is held on the buffer while on this list.
...@@ -193,7 +214,7 @@ struct i915_address_space { ...@@ -193,7 +214,7 @@ struct i915_address_space {
* LRU list of objects which are not in the ringbuffer and * LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT. * are ready to unbind, but are still in the GTT.
* *
* last_rendering_seqno is 0 while an object is in this list. * last_read_req is NULL while an object is in this list.
* *
* A reference is not held on the buffer while on this list, * A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being * as merely being GTT-bound shouldn't prevent its being
......
...@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring) ...@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, NULL, so.obj, NULL); ret = __i915_add_request(ring, NULL, so.obj);
/* __i915_add_request moves object to inactive if it fails */ /* __i915_add_request moves object to inactive if it fails */
out: out:
i915_gem_render_state_fini(&so); i915_gem_render_state_fini(&so);
......
...@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
obj->fence_dirty = obj->fence_dirty =
obj->last_fenced_seqno || obj->last_fenced_req ||
obj->fence_reg != I915_FENCE_REG_NONE; obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode; obj->tiling_mode = args->tiling_mode;
......
...@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size; err->size = obj->base.size;
err->name = obj->base.name; err->name = obj->base.name;
err->rseqno = obj->last_read_seqno; err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
err->wseqno = obj->last_write_seqno; err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
...@@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->pinned = 0; err->pinned = 0;
if (i915_gem_obj_is_pinned(obj)) if (i915_gem_obj_is_pinned(obj))
err->pinned = 1; err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
err->tiling = obj->tiling_mode; err->tiling = obj->tiling_mode;
err->dirty = obj->dirty; err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
err->ring = obj->ring ? obj->ring->id : -1; err->ring = obj->last_read_req ?
i915_gem_request_get_ring(obj->last_read_req)->id : -1;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
} }
...@@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, ...@@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
break; break;
list_for_each_entry(vma, &obj->vma_list, vma_link) list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) { if (vma->vm == vm && vma->pin_count > 0)
capture_bo(err++, vma); capture_bo(err++, vma);
break;
}
} }
return err - first; return err - first;
...@@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev, ...@@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
/* Fences */ if (IS_GEN3(dev) || IS_GEN2(dev)) {
switch (INTEL_INFO(dev)->gen) {
case 9:
case 8:
case 7:
case 6:
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break; if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
default: error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
BUG(); (i * 4));
} } else if (IS_GEN5(dev) || IS_GEN4(dev))
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 +
(i * 8));
else if (INTEL_INFO(dev)->gen >= 6)
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
(i * 8));
} }
...@@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
switch (INTEL_INFO(dev)->gen) { if (IS_GEN6(dev))
case 9: ering->vm_info.pp_dir_base =
case 8: I915_READ(RING_PP_DIR_BASE_READ(ring));
else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] = ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(ring, i)); I915_READ(GEN8_RING_PDP_UDW(ring, i));
...@@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->vm_info.pdp[i] |= ering->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(ring, i)); I915_READ(GEN8_RING_PDP_LDW(ring, i));
} }
break;
case 7:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(ring));
break;
case 6:
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(ring));
break;
}
} }
} }
...@@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, ...@@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
list_for_each_entry(vma, &obj->vma_list, vma_link) list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->vm == vm && vma->pin_count > 0) { if (vma->vm == vm && vma->pin_count > 0)
i++; i++;
break;
}
} }
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
...@@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) ...@@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
switch (INTEL_INFO(dev)->gen) { if (IS_GEN2(dev) || IS_GEN3(dev))
case 2:
case 3:
instdone[0] = I915_READ(INSTDONE); instdone[0] = I915_READ(INSTDONE);
break; else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
case 4:
case 5:
case 6:
instdone[0] = I915_READ(INSTDONE_I965); instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1); instdone[1] = I915_READ(INSTDONE1);
break; } else if (INTEL_INFO(dev)->gen >= 7) {
default:
WARN_ONCE(1, "Unsupported platform\n");
case 7:
case 8:
case 9:
instdone[0] = I915_READ(GEN7_INSTDONE_1); instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE); instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
break;
} }
} }
...@@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, ...@@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{ {
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv))) if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return; return;
...@@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, ...@@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
{ {
uint32_t new_val; uint32_t new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
new_val = dev_priv->pm_irq_mask; new_val = dev_priv->pm_irq_mask;
...@@ -332,6 +336,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, ...@@ -332,6 +336,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask; sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask); sdeimr |= (~enabled_irq_mask & interrupt_mask);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv))) if (WARN_ON(!intel_irqs_enabled(dev_priv)))
...@@ -1017,7 +1023,7 @@ static void notify_ring(struct drm_device *dev, ...@@ -1017,7 +1023,7 @@ static void notify_ring(struct drm_device *dev,
if (!intel_ring_initialized(ring)) if (!intel_ring_initialized(ring))
return; return;
trace_i915_gem_request_complete(ring); trace_i915_gem_request_notify(ring);
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
} }
...@@ -1383,14 +1389,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, ...@@ -1383,14 +1389,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (rcs & GT_RENDER_USER_INTERRUPT) if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring); notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring); intel_lrc_irq_handler(ring);
bcs = tmp >> GEN8_BCS_IRQ_SHIFT; bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
ring = &dev_priv->ring[BCS]; ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT) if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring); notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring); intel_lrc_irq_handler(ring);
} else } else
DRM_ERROR("The master control interrupt lied (GT0)!\n"); DRM_ERROR("The master control interrupt lied (GT0)!\n");
} }
...@@ -1406,14 +1412,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, ...@@ -1406,14 +1412,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT) if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring); notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring); intel_lrc_irq_handler(ring);
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2]; ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT) if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring); notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring); intel_lrc_irq_handler(ring);
} else } else
DRM_ERROR("The master control interrupt lied (GT1)!\n"); DRM_ERROR("The master control interrupt lied (GT1)!\n");
} }
...@@ -1440,7 +1446,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, ...@@ -1440,7 +1446,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (vcs & GT_RENDER_USER_INTERRUPT) if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring); notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring); intel_lrc_irq_handler(ring);
} else } else
DRM_ERROR("The master control interrupt lied (GT3)!\n"); DRM_ERROR("The master control interrupt lied (GT3)!\n");
} }
...@@ -2753,18 +2759,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) ...@@ -2753,18 +2759,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
static u32 static struct drm_i915_gem_request *
ring_last_seqno(struct intel_engine_cs *ring) ring_last_request(struct intel_engine_cs *ring)
{ {
return list_entry(ring->request_list.prev, return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno; struct drm_i915_gem_request, list);
} }
static bool static bool
ring_idle(struct intel_engine_cs *ring, u32 seqno) ring_idle(struct intel_engine_cs *ring)
{ {
return (list_empty(&ring->request_list) || return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring))); i915_gem_request_completed(ring_last_request(ring), false));
} }
static bool static bool
...@@ -2984,7 +2990,7 @@ static void i915_hangcheck_elapsed(unsigned long data) ...@@ -2984,7 +2990,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
acthd = intel_ring_get_active_head(ring); acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) { if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) { if (ring_idle(ring)) {
ring->hangcheck.action = HANGCHECK_IDLE; ring->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&ring->irq_queue)) { if (waitqueue_active(&ring->irq_queue)) {
......
...@@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = { ...@@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = {
.vbt_sdvo_panel_type = -1, .vbt_sdvo_panel_type = -1,
.enable_rc6 = -1, .enable_rc6 = -1,
.enable_fbc = -1, .enable_fbc = -1,
.enable_execlists = 0, .enable_execlists = -1,
.enable_hangcheck = true, .enable_hangcheck = true,
.enable_ppgtt = -1, .enable_ppgtt = -1,
.enable_psr = 0, .enable_psr = 0,
...@@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = { ...@@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = {
.disable_vtd_wa = 0, .disable_vtd_wa = 0,
.use_mmio_flip = 0, .use_mmio_flip = 0,
.mmio_debug = 0, .mmio_debug = 0,
.verbose_state_checks = 1,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -122,7 +123,7 @@ MODULE_PARM_DESC(enable_ppgtt, ...@@ -122,7 +123,7 @@ MODULE_PARM_DESC(enable_ppgtt,
module_param_named(enable_execlists, i915.enable_execlists, int, 0400); module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists, MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. " "Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)"); "(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600); module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
...@@ -173,3 +174,7 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); ...@@ -173,3 +174,7 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug, MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively " "Enable the MMIO debug code (default: false). This may negatively "
"affect performance."); "affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
This diff is collapsed.
...@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev) ...@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev)
} }
/* only restore FBC info on the platform that supports FBC*/ /* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev); intel_fbc_disable(dev);
/* restore FBC interval */ /* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
......
...@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm, ...@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
TRACE_EVENT(i915_gem_ring_sync_to, TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from, TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to, struct intel_engine_cs *to,
u32 seqno), struct drm_i915_gem_request *req),
TP_ARGS(from, to, seqno), TP_ARGS(from, to, req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->dev->primary->index; __entry->dev = from->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to->id; __entry->sync_to = to->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
...@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
); );
TRACE_EVENT(i915_gem_ring_dispatch, TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags), TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(ring, seqno, flags), TP_ARGS(req, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(ring, seqno); i915_trace_irq_get(ring, req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
...@@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush, ...@@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush,
); );
DECLARE_EVENT_CLASS(i915_gem_request, DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno), TP_ARGS(req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno) __entry->dev, __entry->ring, __entry->uniq,
__entry->seqno)
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_add, DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
); );
TRACE_EVENT(i915_gem_request_complete, TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring), TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring), TP_ARGS(ring),
...@@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete, ...@@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete,
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req)
); );
TRACE_EVENT(i915_gem_request_wait_begin, TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno), TP_ARGS(req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
__field(bool, blocking) __field(bool, blocking)
), ),
...@@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->seqno = seqno; __entry->uniq = req ? req->uniq : 0;
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); __entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->seqno, __entry->dev, __entry->ring, __entry->uniq,
__entry->blocking ? "yes (NB)" : "no") __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno), TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(ring, seqno) TP_ARGS(req)
); );
DECLARE_EVENT_CLASS(i915_ring, DECLARE_EVENT_CLASS(i915_ring,
......
...@@ -314,6 +314,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -314,6 +314,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{ {
const struct bdb_lfp_backlight_data *backlight_data; const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry; const struct bdb_lfp_backlight_data_entry *entry;
const struct bdb_lfp_backlight_control_data *bl_ctrl_data;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data) if (!backlight_data)
...@@ -326,6 +327,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -326,6 +327,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
} }
entry = &backlight_data->data[panel_type]; entry = &backlight_data->data[panel_type];
bl_ctrl_data = &backlight_data->blc_ctl[panel_type];
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) { if (!dev_priv->vbt.backlight.present) {
...@@ -337,12 +339,30 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -337,12 +339,30 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness; dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
dev_priv->vbt.backlight.controller = 0;
if (bdb->version >= 191) {
dev_priv->vbt.backlight.present =
bl_ctrl_data->pin == BLC_CONTROL_PIN_DDI;
if (!dev_priv->vbt.backlight.present) {
DRM_DEBUG_KMS("BL control pin is not DDI (pin %u)\n",
bl_ctrl_data->pin);
return;
}
if (bl_ctrl_data->controller == 1)
dev_priv->vbt.backlight.controller =
bl_ctrl_data->controller;
}
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n", "active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz, dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness, dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]); backlight_data->level[panel_type]);
DRM_DEBUG_KMS("VBT BL controller %u\n",
dev_priv->vbt.backlight.controller);
} }
/* Try to find sdvo panel data */ /* Try to find sdvo panel data */
...@@ -664,6 +684,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -664,6 +684,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
} }
} }
static void
parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_psr *psr;
struct psr_table *psr_table;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
DRM_DEBUG_KMS("No PSR BDB found.\n");
return;
}
psr_table = &psr->psr_table[panel_type];
dev_priv->vbt.psr.full_link = psr_table->full_link;
dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
/* Allowed VBT values goes from 0 to 15 */
dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
switch (psr_table->lines_to_wait) {
case 0:
dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
break;
case 1:
dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
break;
case 2:
dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
break;
case 3:
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
psr_table->lines_to_wait);
break;
}
dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
static u8 *goto_next_sequence(u8 *data, int *size) static u8 *goto_next_sequence(u8 *data, int *size)
{ {
u16 len; u16 len;
...@@ -1241,6 +1305,7 @@ intel_parse_bios(struct drm_device *dev) ...@@ -1241,6 +1305,7 @@ intel_parse_bios(struct drm_device *dev)
parse_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb); parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb); parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb); parse_mipi(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb);
......
...@@ -80,7 +80,7 @@ struct vbios_data { ...@@ -80,7 +80,7 @@ struct vbios_data {
#define BDB_EXT_MMIO_REGS 6 #define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7 #define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8 #define BDB_SWF_MMIO 8
#define BDB_DOT_CLOCK_TABLE 9 #define BDB_PSR 9
#define BDB_MODE_REMOVAL_TABLE 10 #define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11 #define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12 #define BDB_DRIVER_FEATURES 12
...@@ -402,10 +402,21 @@ struct bdb_lfp_backlight_data_entry { ...@@ -402,10 +402,21 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3; u8 obsolete3;
} __packed; } __packed;
#define BLC_CONTROL_PIN_PMIC 0
#define BLC_CONTROL_PIN_LPSS_PWM 1
#define BLC_CONTROL_PIN_DDI 2
#define BLC_CONTROL_PIN_CABC 3
struct bdb_lfp_backlight_control_data {
u8 controller:4;
u8 pin:4;
} __packed;
struct bdb_lfp_backlight_data { struct bdb_lfp_backlight_data {
u8 entry_size; u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16]; struct bdb_lfp_backlight_data_entry data[16];
u8 level[16]; u8 level[16];
struct bdb_lfp_backlight_control_data blc_ctl[16];
} __packed; } __packed;
struct aimdb_header { struct aimdb_header {
...@@ -556,6 +567,26 @@ struct bdb_edp { ...@@ -556,6 +567,26 @@ struct bdb_edp {
u16 edp_t3_optimization; u16 edp_t3_optimization;
} __packed; } __packed;
struct psr_table {
/* Feature bits */
u8 full_link:1;
u8 require_aux_to_wakeup:1;
u8 feature_bits_rsvd:6;
/* Wait times */
u8 idle_frames:4;
u8 lines_to_wait:3;
u8 wait_times_rsvd:1;
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
} __packed;
void intel_setup_bios(struct drm_device *dev); void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev); int intel_parse_bios(struct drm_device *dev);
...@@ -798,7 +829,8 @@ struct mipi_config { ...@@ -798,7 +829,8 @@ struct mipi_config {
#define DUAL_LINK_PIXEL_ALT 2 #define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2; u16 dual_link:2;
u16 lane_cnt:2; u16 lane_cnt:2;
u16 rsvd3:12; u16 pixel_overlap:3;
u16 rsvd3:9;
u16 rsvd4; u16 rsvd4;
......
...@@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { ...@@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
}; };
static const struct ddi_buf_trans skl_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00000018, 0x000000a0 }, { 0x00000018, 0x000000a2 },
{ 0x00004014, 0x00000098 }, { 0x00004014, 0x0000009B },
{ 0x00006012, 0x00000088 }, { 0x00006012, 0x00000088 },
{ 0x00008010, 0x00000080 }, { 0x00008010, 0x00000087 },
{ 0x00000018, 0x00000098 }, { 0x00000018, 0x0000009B },
{ 0x00004014, 0x00000088 }, { 0x00004014, 0x00000088 },
{ 0x00006012, 0x00000080 }, { 0x00006012, 0x00000087 },
{ 0x00000018, 0x00000088 }, { 0x00000018, 0x00000088 },
{ 0x00004014, 0x00000080 }, { 0x00004014, 0x00000087 },
}; };
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
...@@ -834,7 +834,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, ...@@ -834,7 +834,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_clock_get(struct intel_encoder *encoder, void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct drm_device *dev = encoder->base.dev;
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config); hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
} }
static void static void
...@@ -2029,7 +2034,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, ...@@ -2029,7 +2034,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct intel_hdmi *intel_hdmi; struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0; u32 temp, flags = 0;
struct drm_device *dev = dev_priv->dev;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC) if (temp & TRANS_DDI_PHSYNC)
...@@ -2106,10 +2110,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, ...@@ -2106,10 +2110,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
} }
if (INTEL_INFO(dev)->gen <= 8) intel_ddi_clock_get(encoder, pipe_config);
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
} }
static void intel_ddi_destroy(struct drm_encoder *encoder) static void intel_ddi_destroy(struct drm_encoder *encoder)
......
This diff is collapsed.
...@@ -1558,7 +1558,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) ...@@ -1558,7 +1558,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = edp_panel_vdd_on(intel_dp); vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp); pps_unlock(intel_dp);
WARN(!vdd, "eDP port %c VDD already requested on\n", I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port)); port_name(dp_to_dig_port(intel_dp)->port));
} }
...@@ -1642,7 +1642,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) ...@@ -1642,7 +1642,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!is_edp(intel_dp)) if (!is_edp(intel_dp))
return; return;
WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port)); port_name(dp_to_dig_port(intel_dp)->port));
intel_dp->want_panel_vdd = false; intel_dp->want_panel_vdd = false;
...@@ -2105,6 +2105,9 @@ static void intel_disable_dp(struct intel_encoder *encoder) ...@@ -2105,6 +2105,9 @@ static void intel_disable_dp(struct intel_encoder *encoder)
if (crtc->config.has_audio) if (crtc->config.has_audio)
intel_audio_codec_disable(encoder); intel_audio_codec_disable(encoder);
if (HAS_PSR(dev) && !HAS_DDI(dev))
intel_psr_disable(intel_dp);
/* Make sure the panel is off before trying to change the mode. But also /* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */ * ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_vdd_on(intel_dp);
...@@ -2329,6 +2332,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder) ...@@ -2329,6 +2332,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp); intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
} }
static void g4x_pre_enable_dp(struct intel_encoder *encoder) static void g4x_pre_enable_dp(struct intel_encoder *encoder)
...@@ -4306,7 +4310,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) ...@@ -4306,7 +4310,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
drm_dp_aux_unregister(&intel_dp->aux); drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port); intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) { if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work); cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/* /*
...@@ -4322,6 +4325,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) ...@@ -4322,6 +4325,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL; intel_dp->edp_notifier.notifier_call = NULL;
} }
} }
drm_encoder_cleanup(encoder);
kfree(intel_dig_port); kfree(intel_dig_port);
} }
...@@ -4763,14 +4767,9 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) ...@@ -4763,14 +4767,9 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
} }
/* /*
* FIXME: This needs proper synchronization with psr state. But really * FIXME: This needs proper synchronization with psr state for some
* hard to tell without seeing the user of this function of this code. * platforms that cannot have PSR and DRRS enabled at the same time.
* Check locking and ordering once that lands.
*/ */
if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
encoder = intel_attached_encoder(&intel_connector->base); encoder = intel_attached_encoder(&intel_connector->base);
intel_dp = enc_to_intel_dp(&encoder->base); intel_dp = enc_to_intel_dp(&encoder->base);
...@@ -5086,7 +5085,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -5086,7 +5085,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector); intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */ /* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
if (port == PORT_B || port == PORT_C || port == PORT_D) { if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port, intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id); intel_connector->base.base.id);
......
...@@ -244,8 +244,7 @@ typedef struct dpll { ...@@ -244,8 +244,7 @@ typedef struct dpll {
} intel_clock_t; } intel_clock_t;
struct intel_plane_state { struct intel_plane_state {
struct drm_crtc *crtc; struct drm_plane_state base;
struct drm_framebuffer *fb;
struct drm_rect src; struct drm_rect src;
struct drm_rect dst; struct drm_rect dst;
struct drm_rect clip; struct drm_rect clip;
...@@ -406,8 +405,7 @@ struct intel_pipe_wm { ...@@ -406,8 +405,7 @@ struct intel_pipe_wm {
}; };
struct intel_mmio_flip { struct intel_mmio_flip {
u32 seqno; struct drm_i915_gem_request *req;
struct intel_engine_cs *ring;
struct work_struct work; struct work_struct work;
}; };
...@@ -510,6 +508,10 @@ struct intel_plane { ...@@ -510,6 +508,10 @@ struct intel_plane {
uint32_t src_w, uint32_t src_h); uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane, void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
int (*update_colorkey)(struct drm_plane *plane, int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key); struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane, void (*get_colorkey)(struct drm_plane *plane,
...@@ -708,8 +710,7 @@ struct intel_unpin_work { ...@@ -708,8 +710,7 @@ struct intel_unpin_work {
#define INTEL_FLIP_COMPLETE 2 #define INTEL_FLIP_COMPLETE 2
u32 flip_count; u32 flip_count;
u32 gtt_offset; u32 gtt_offset;
struct intel_engine_cs *flip_queued_ring; struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_seqno;
int flip_queued_vblank; int flip_queued_vblank;
int flip_ready_vblank; int flip_ready_vblank;
bool enable_stall_check; bool enable_stall_check;
...@@ -874,7 +875,6 @@ void intel_audio_codec_enable(struct intel_encoder *encoder); ...@@ -874,7 +875,6 @@ void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder); void intel_audio_codec_disable(struct intel_encoder *encoder);
/* intel_display.c */ /* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev); bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev); void intel_mark_busy(struct drm_device *dev);
...@@ -925,6 +925,10 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane); ...@@ -925,6 +925,10 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe); void intel_check_page_flip(struct drm_device *dev, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
/* shared dpll functions */ /* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
...@@ -1010,6 +1014,12 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder); ...@@ -1010,6 +1014,12 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int intel_disable_plane(struct drm_plane *plane);
/* intel_dp_mst.c */ /* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
...@@ -1053,6 +1063,13 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) ...@@ -1053,6 +1063,13 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
} }
#endif #endif
/* intel_fbc.c */
bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
/* intel_hdmi.c */ /* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
...@@ -1083,6 +1100,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1083,6 +1100,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int intel_overlay_attrs(struct drm_device *dev, void *data, int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */ /* intel_panel.c */
...@@ -1115,7 +1133,6 @@ void intel_backlight_unregister(struct drm_device *dev); ...@@ -1115,7 +1133,6 @@ void intel_backlight_unregister(struct drm_device *dev);
/* intel_psr.c */ /* intel_psr.c */
bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp); void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp); void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev, void intel_psr_invalidate(struct drm_device *dev,
...@@ -1159,8 +1176,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, ...@@ -1159,8 +1176,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
bool enabled, bool scaled); bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev); void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev); void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void); void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_device *dev); void intel_init_gt_powersave(struct drm_device *dev);
...@@ -1191,7 +1206,6 @@ int intel_plane_set_property(struct drm_plane *plane, ...@@ -1191,7 +1206,6 @@ int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop, struct drm_property *prop,
uint64_t val); uint64_t val);
int intel_plane_restore(struct drm_plane *plane); int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data, int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data, int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
......
This diff is collapsed.
...@@ -28,6 +28,11 @@ ...@@ -28,6 +28,11 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include "intel_drv.h" #include "intel_drv.h"
/* Dual Link support */
#define DSI_DUAL_LINK_NONE 0
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
struct intel_dsi_device { struct intel_dsi_device {
unsigned int panel_id; unsigned int panel_id;
const char *name; const char *name;
...@@ -78,6 +83,9 @@ struct intel_dsi { ...@@ -78,6 +83,9 @@ struct intel_dsi {
struct intel_connector *attached_connector; struct intel_connector *attached_connector;
/* bit mask of ports being driven */
u16 ports;
/* if true, use HS mode, otherwise LP */ /* if true, use HS mode, otherwise LP */
bool hs; bool hs;
...@@ -101,6 +109,8 @@ struct intel_dsi { ...@@ -101,6 +109,8 @@ struct intel_dsi {
u8 clock_stop; u8 clock_stop;
u8 escape_clk_div; u8 escape_clk_div;
u8 dual_link;
u8 pixel_overlap;
u32 port_bits; u32 port_bits;
u32 bw_timer; u32 bw_timer;
u32 dphy_reg; u32 dphy_reg;
...@@ -127,6 +137,22 @@ struct intel_dsi { ...@@ -127,6 +137,22 @@ struct intel_dsi {
u16 panel_pwr_cycle_delay; u16 panel_pwr_cycle_delay;
}; };
/* XXX: Transitional before dual port configuration */
static inline enum port intel_dsi_pipe_to_port(enum pipe pipe)
{
if (pipe == PIPE_A)
return PORT_A;
else if (pipe == PIPE_B)
return PORT_C;
WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe));
return PORT_C;
}
#define for_each_dsi_port(__port, __ports_mask) \
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
if ((__ports_mask) & (1 << (__port)))
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{ {
return container_of(encoder, struct intel_dsi, base.base); return container_of(encoder, struct intel_dsi, base.base);
......
This diff is collapsed.
...@@ -36,77 +36,81 @@ ...@@ -36,77 +36,81 @@
#define DPI_LP_MODE_EN false #define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true #define DPI_HS_MODE_EN true
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable); void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable,
enum port port);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len); const u8 *data, int len, enum port port);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel, int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len); const u8 *data, int len, enum port port);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen); u8 *buf, int buflen, enum port port);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen); u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi); void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */ /* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd) int channel, u8 dcs_cmd, enum port port)
{ {
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1); return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port);
} }
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi, static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param) int channel, u8 dcs_cmd, u8 param, enum port port)
{ {
u8 buf[2] = { dcs_cmd, param }; u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2); return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port);
} }
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel) int channel, enum port port)
{ {
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0); return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port);
} }
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param) int channel, u8 param, enum port port)
{ {
return dsi_vc_generic_write(intel_dsi, channel, &param, 1); return dsi_vc_generic_write(intel_dsi, channel, &param, 1, port);
} }
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2) int channel, u8 param1, u8 param2, enum port port)
{ {
u8 buf[2] = { param1, param2 }; u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2); return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port);
} }
/* XXX: questionable read helpers */ /* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen) int channel, u8 *buf, int buflen, enum port port)
{ {
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen); return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen,
port);
} }
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf, int channel, u8 param, u8 *buf,
int buflen) int buflen, enum port port)
{ {
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen); return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen,
port);
} }
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi, static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2, int channel, u8 param1, u8 param2,
u8 *buf, int buflen) u8 *buf, int buflen, enum port port)
{ {
u8 req[2] = { param1, param2 }; u8 req[2] = { param1, param2 };
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen); return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen,
port);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -112,7 +112,7 @@ struct intel_ctx_submit_request { ...@@ -112,7 +112,7 @@ struct intel_ctx_submit_request {
int elsp_submitted; int elsp_submitted;
}; };
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring); void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -118,7 +118,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, ...@@ -118,7 +118,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
} }
/** /**
* intel_display_power_is_enabled - unlocked check for a power domain * intel_display_power_is_enabled - check for a power domain
* @dev_priv: i915 device instance * @dev_priv: i915 device instance
* @domain: power domain to check * @domain: power domain to check
* *
...@@ -633,7 +633,7 @@ static void check_power_well_state(struct drm_i915_private *dev_priv, ...@@ -633,7 +633,7 @@ static void check_power_well_state(struct drm_i915_private *dev_priv,
return; return;
mismatch: mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled, power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well); power_well->count, i915.disable_power_well);
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment