Commit e1dee197 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2015-04-23-fixed' of...

Merge tag 'drm-intel-next-2015-04-23-fixed' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2015-04-23:
- dither support for ns2501 dvo (Thomas Richter)
- some polish for the gtt code and fixes to finally enable the cmd parser on hsw
- first pile of bxt stage 1 enabling (too many different people to list ...)
- more psr fixes from Rodrigo
- skl rotation support from Chandra
- more atomic work from Ander and Matt
- pile of cleanups and micro-ops for execlist from Chris
drm-intel-next-2015-04-10:
- cdclk handling cleanup and fixes from Ville
- more prep patches for olr removal from John Harrison
- gmbus pin naming rework from Jani (prep for bxt)
- remove ->new_config from Ander (more atomic conversion work)
- rps (boost) tuning and unification with byt/bsw from Chris
- cmd parser batch bool tuning from Chris
- gen8 dynamic pte allocation (Michel Thierry, based on work from Ben Widawsky)
- execlist tuning (not yet all of it) from Chris
- add drm_plane_from_index (Chandra)
- various small things all over

* tag 'drm-intel-next-2015-04-23-fixed' of git://anongit.freedesktop.org/drm-intel: (204 commits)
  drm/i915/gtt: Allocate va range only if vma is not bound
  drm/i915: Enable cmd parser to do secure batch promotion for aliasing ppgtt
  drm/i915: fix intel_prepare_ddi
  drm/i915: factor out ddi_get_encoder_port
  drm/i915/hdmi: check port in ibx_infoframe_enabled
  drm/i915/hdmi: fix vlv infoframe port check
  drm/i915: Silence compiler warning in dvo
  drm/i915: Update DRIVER_DATE to 20150423
  drm/i915: Enable dithering on NatSemi DVO2501 for Fujitsu S6010
  rm/i915: Move i915_get_ggtt_vma_pages into ggtt_bind_vma
  drm/i915: Don't try to outsmart gcc in i915_gem_gtt.c
  drm/i915: Unduplicate i915_ggtt_unbind/bind_vma
  drm/i915: Move ppgtt_bind/unbind around
  drm/i915: move i915_gem_restore_gtt_mappings around
  drm/i915: Fix up the vma aliasing ppgtt binding
  drm/i915: Remove misleading comment around bind_to_vm
  drm/i915: Don't use atomics for pg_dirty_rings
  drm/i915: Don't look at pg_dirty_rings for aliasing ppgtt
  drm/i915/skl: Support Y tiling in MMIO flips
  drm/i915: Fixup kerneldoc for struct intel_context
  ...

Conflicts:
	drivers/gpu/drm/i915/i915_drv.c
parents c0fe07aa 93a96c6f
...@@ -4067,7 +4067,7 @@ int num_ioctls;</synopsis> ...@@ -4067,7 +4067,7 @@ int num_ioctls;</synopsis>
<title>DPIO</title> <title>DPIO</title>
!Pdrivers/gpu/drm/i915/i915_reg.h DPIO !Pdrivers/gpu/drm/i915/i915_reg.h DPIO
<table id="dpiox2"> <table id="dpiox2">
<title>Dual channel PHY (VLV/CHV)</title> <title>Dual channel PHY (VLV/CHV/BXT)</title>
<tgroup cols="8"> <tgroup cols="8">
<colspec colname="c0" /> <colspec colname="c0" />
<colspec colname="c1" /> <colspec colname="c1" />
...@@ -4118,7 +4118,7 @@ int num_ioctls;</synopsis> ...@@ -4118,7 +4118,7 @@ int num_ioctls;</synopsis>
</tgroup> </tgroup>
</table> </table>
<table id="dpiox1"> <table id="dpiox1">
<title>Single channel PHY (CHV)</title> <title>Single channel PHY (CHV/BXT)</title>
<tgroup cols="4"> <tgroup cols="4">
<colspec colname="c0" /> <colspec colname="c0" />
<colspec colname="c1" /> <colspec colname="c1" />
......
...@@ -546,6 +546,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = { ...@@ -546,6 +546,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_BDW_D_IDS(&gen8_stolen_funcs), INTEL_BDW_D_IDS(&gen8_stolen_funcs),
INTEL_CHV_IDS(&chv_stolen_funcs), INTEL_CHV_IDS(&chv_stolen_funcs),
INTEL_SKL_IDS(&gen9_stolen_funcs), INTEL_SKL_IDS(&gen9_stolen_funcs),
INTEL_BXT_IDS(&gen9_stolen_funcs),
}; };
static void __init intel_graphics_stolen(int num, int slot, int func) static void __init intel_graphics_stolen(int num, int slot, int func)
......
...@@ -1288,6 +1288,29 @@ unsigned int drm_plane_index(struct drm_plane *plane) ...@@ -1288,6 +1288,29 @@ unsigned int drm_plane_index(struct drm_plane *plane)
} }
EXPORT_SYMBOL(drm_plane_index); EXPORT_SYMBOL(drm_plane_index);
/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
*
* Given a plane index, return the registered plane from DRM device's
* list of planes with matching index.
*/
struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
unsigned int i = 0;
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (i == idx)
return plane;
i++;
}
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
/** /**
* drm_plane_force_disable - Forcibly disable a plane * drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable * @plane: plane to disable
......
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
* Authors: * Authors:
* Eric Anholt <eric@anholt.net> * Eric Anholt <eric@anholt.net>
* *
* Minor modifications (Dithering enable):
* Thomas Richter <thor@math.tu-berlin.de>
*
*/ */
#include "dvo.h" #include "dvo.h"
...@@ -59,6 +62,8 @@ ...@@ -59,6 +62,8 @@
# define VR01_DVO_BYPASS_ENABLE (1 << 1) # define VR01_DVO_BYPASS_ENABLE (1 << 1)
/** Enables the DVO clock */ /** Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0) # define VR01_DVO_ENABLE (1 << 0)
/** Enable dithering for 18bpp panels. Not documented. */
# define VR01_DITHER_ENABLE (1 << 4)
/* /*
* LCD Interface Format * LCD Interface Format
...@@ -74,6 +79,8 @@ ...@@ -74,6 +79,8 @@
# define VR10_INTERFACE_2X18 (2 << 2) # define VR10_INTERFACE_2X18 (2 << 2)
/** Enables 2x24-bit LVDS output */ /** Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2) # define VR10_INTERFACE_2X24 (3 << 2)
/** Mask that defines the depth of the pipeline */
# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
/* /*
* VR20 LCD Horizontal Display Size * VR20 LCD Horizontal Display Size
...@@ -342,9 +349,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, ...@@ -342,9 +349,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
uint16_t vr40 = 0; uint16_t vr40 = 0;
uint16_t vr01; uint16_t vr01 = 0;
uint16_t vr10;
ivch_read(dvo, VR10, &vr10);
/* Enable dithering for 18 bpp pipelines */
vr10 &= VR10_INTERFACE_DEPTH_MASK;
if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
vr01 = VR01_DITHER_ENABLE;
vr01 = 0;
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
VR40_HORIZONTAL_INTERP_ENABLE); VR40_HORIZONTAL_INTERP_ENABLE);
...@@ -353,7 +366,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, ...@@ -353,7 +366,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
uint16_t x_ratio, y_ratio; uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE; vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE; vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
x_ratio = (((mode->hdisplay - 1) << 16) / x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2; (adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) / y_ratio = (((mode->vdisplay - 1) << 16) /
...@@ -380,6 +393,8 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo) ...@@ -380,6 +393,8 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_DEBUG_KMS("VR00: 0x%04x\n", val); DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val); ivch_read(dvo, VR01, &val);
DRM_DEBUG_KMS("VR01: 0x%04x\n", val); DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR10, &val);
DRM_DEBUG_KMS("VR10: 0x%04x\n", val);
ivch_read(dvo, VR30, &val); ivch_read(dvo, VR30, &val);
DRM_DEBUG_KMS("VR30: 0x%04x\n", val); DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val); ivch_read(dvo, VR40, &val);
......
This diff is collapsed.
...@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -869,6 +869,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
batch_len + batch_start_offset > src_obj->base.size) batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
if (WARN_ON(dest_obj->pages_pin_count == 0))
return ERR_PTR(-ENODEV);
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
...@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -882,13 +885,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
goto unpin_src; goto unpin_src;
} }
ret = i915_gem_object_get_pages(dest_obj);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
goto unmap_src;
}
i915_gem_object_pin_pages(dest_obj);
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
...@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, ...@@ -898,7 +894,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
dst = vmap_batch(dest_obj, 0, batch_len); dst = vmap_batch(dest_obj, 0, batch_len);
if (!dst) { if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM; ret = -ENOMEM;
goto unmap_src; goto unmap_src;
} }
...@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring, ...@@ -1129,7 +1124,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
} }
vunmap(batch_base); vunmap(batch_base);
i915_gem_object_unpin_pages(shadow_batch_obj);
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -564,65 +564,13 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) ...@@ -564,65 +564,13 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#undef SEP_COMMA #undef SEP_COMMA
} }
/* static void cherryview_sseu_info_init(struct drm_device *dev)
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
* - it's judged too laborious to fill n static structures with the limit
* when a simple if statement does the job,
* - run-time checks (eg read fuse/strap registers) are needed.
*
* This function needs to be called:
* - after the MMIO has been setup as we are reading registers,
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
static void intel_device_info_runtime_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info; struct intel_device_info *info;
enum pipe pipe;
info = (struct intel_device_info *)&dev_priv->info;
if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
!IS_VALLEYVIEW(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
* is fused off. Unfortunately it seems that, at least in
* certain cases, fused off display means that PCH display
* reads don't land anywhere. In that case, we read 0s.
*
* On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
* should be set when taking over after the firmware.
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(dev_priv->pch_type == PCH_CPT &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
}
}
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev)) {
u32 fuse, eu_dis; u32 fuse, eu_dis;
info = (struct intel_device_info *)&dev_priv->info;
fuse = I915_READ(CHV_FUSE_GT); fuse = I915_READ(CHV_FUSE_GT);
info->slice_total = 1; info->slice_total = 1;
...@@ -657,21 +605,35 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -657,21 +605,35 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->has_slice_pg = 0; info->has_slice_pg = 0;
info->has_subslice_pg = (info->subslice_total > 1); info->has_subslice_pg = (info->subslice_total > 1);
info->has_eu_pg = (info->eu_per_subslice > 2); info->has_eu_pg = (info->eu_per_subslice > 2);
} else if (IS_SKYLAKE(dev)) { }
const int s_max = 3, ss_max = 4, eu_max = 8;
static void gen9_sseu_info_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss; int s, ss;
u32 fuse2, eu_disable[s_max], s_enable, ss_disable; u32 fuse2, s_enable, ss_disable, eu_disable;
u8 eu_mask = 0xff;
/*
* BXT has a single slice. BXT also has at most 6 EU per subslice,
* and therefore only the lowest 6 bits of the 8-bit EU disable
* fields are valid.
*/
if (IS_BROXTON(dev)) {
s_max = 1;
eu_max = 6;
eu_mask = 0x3f;
}
info = (struct intel_device_info *)&dev_priv->info;
fuse2 = I915_READ(GEN8_FUSE2); fuse2 = I915_READ(GEN8_FUSE2);
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
GEN8_F2_S_ENA_SHIFT; GEN8_F2_S_ENA_SHIFT;
ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
GEN9_F2_SS_DIS_SHIFT; GEN9_F2_SS_DIS_SHIFT;
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
info->slice_total = hweight32(s_enable); info->slice_total = hweight32(s_enable);
/* /*
* The subslice disable field is global, i.e. it applies * The subslice disable field is global, i.e. it applies
...@@ -690,25 +652,26 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -690,25 +652,26 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
/* skip disabled slice */ /* skip disabled slice */
continue; continue;
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < ss_max; ss++) { for (ss = 0; ss < ss_max; ss++) {
u32 n_disabled; int eu_per_ss;
if (ss_disable & (0x1 << ss)) if (ss_disable & (0x1 << ss))
/* skip disabled subslice */ /* skip disabled subslice */
continue; continue;
n_disabled = hweight8(eu_disable[s] >> eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
(ss * eu_max)); eu_mask);
/* /*
* Record which subslice(s) has(have) 7 EUs. we * Record which subslice(s) has(have) 7 EUs. we
* can tune the hash used to spread work among * can tune the hash used to spread work among
* subslices if they are unbalanced. * subslices if they are unbalanced.
*/ */
if (eu_max - n_disabled == 7) if (eu_per_ss == 7)
info->subslice_7eu[s] |= 1 << ss; info->subslice_7eu[s] |= 1 << ss;
info->eu_total += eu_max - n_disabled; info->eu_total += eu_per_ss;
} }
} }
...@@ -716,7 +679,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -716,7 +679,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
* SKL is expected to always have a uniform distribution * SKL is expected to always have a uniform distribution
* of EU across subslices with the exception that any one * of EU across subslices with the exception that any one
* EU in any one subslice may be fused off for die * EU in any one subslice may be fused off for die
* recovery. * recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/ */
info->eu_per_subslice = info->subslice_total ? info->eu_per_subslice = info->subslice_total ?
DIV_ROUND_UP(info->eu_total, DIV_ROUND_UP(info->eu_total,
...@@ -724,12 +688,81 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -724,12 +688,81 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
/* /*
* SKL supports slice power gating on devices with more than * SKL supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with * one slice, and supports EU power gating on devices with
* more than one EU pair per subslice. * more than one EU pair per subslice. BXT supports subslice
* power gating on devices with more than one subslice, and
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/ */
info->has_slice_pg = (info->slice_total > 1) ? 1 : 0; info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
info->has_subslice_pg = 0; info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0; info->has_eu_pg = (info->eu_per_subslice > 2);
}
/*
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
* - it's judged too laborious to fill n static structures with the limit
* when a simple if statement does the job,
* - run-time checks (eg read fuse/strap registers) are needed.
*
* This function needs to be called:
* - after the MMIO has been setup as we are reading registers,
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
static void intel_device_info_runtime_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
enum pipe pipe;
info = (struct intel_device_info *)&dev_priv->info;
if (IS_BROXTON(dev)) {
info->num_sprites[PIPE_A] = 3;
info->num_sprites[PIPE_B] = 3;
info->num_sprites[PIPE_C] = 2;
} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
!IS_VALLEYVIEW(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
* is fused off. Unfortunately it seems that, at least in
* certain cases, fused off display means that PCH display
* reads don't land anywhere. In that case, we read 0s.
*
* On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
* should be set when taking over after the firmware.
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(dev_priv->pch_type == PCH_CPT &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
}
} }
/* Initialize slice/subslice/EU info */
if (IS_CHERRYVIEW(dev))
cherryview_sseu_info_init(dev);
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
...@@ -1006,8 +1039,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1006,8 +1039,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
put_bridge: put_bridge:
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
free_priv: free_priv:
if (dev_priv->slab) if (dev_priv->requests)
kmem_cache_destroy(dev_priv->slab); kmem_cache_destroy(dev_priv->requests);
if (dev_priv->vmas)
kmem_cache_destroy(dev_priv->vmas);
if (dev_priv->objects)
kmem_cache_destroy(dev_priv->objects);
kfree(dev_priv); kfree(dev_priv);
return ret; return ret;
} }
...@@ -1072,7 +1109,6 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1072,7 +1109,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
...@@ -1091,8 +1127,12 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1091,8 +1127,12 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL) if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
if (dev_priv->slab) if (dev_priv->requests)
kmem_cache_destroy(dev_priv->slab); kmem_cache_destroy(dev_priv->requests);
if (dev_priv->vmas)
kmem_cache_destroy(dev_priv->vmas);
if (dev_priv->objects)
kmem_cache_destroy(dev_priv->objects);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
kfree(dev_priv); kfree(dev_priv);
......
...@@ -381,6 +381,18 @@ static const struct intel_device_info intel_skylake_gt3_info = { ...@@ -381,6 +381,18 @@ static const struct intel_device_info intel_skylake_gt3_info = {
IVB_CURSOR_OFFSETS, IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1,
.gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
/* /*
* Make sure any device matches here are from most specific to most * Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem * general. For example, since the Quanta match is based on the subsystem
...@@ -420,7 +432,8 @@ static const struct intel_device_info intel_skylake_gt3_info = { ...@@ -420,7 +432,8 @@ static const struct intel_device_info intel_skylake_gt3_info = {
INTEL_CHV_IDS(&intel_cherryview_info), \ INTEL_CHV_IDS(&intel_cherryview_info), \
INTEL_SKL_GT1_IDS(&intel_skylake_info), \ INTEL_SKL_GT1_IDS(&intel_skylake_info), \
INTEL_SKL_GT2_IDS(&intel_skylake_info), \ INTEL_SKL_GT2_IDS(&intel_skylake_info), \
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
INTEL_BXT_IDS(&intel_broxton_info)
static const struct pci_device_id pciidlist[] = { /* aka */ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS, INTEL_PCI_IDS,
...@@ -996,6 +1009,38 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -996,6 +1009,38 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
/* TODO: when DC5 support is added disable DC5 here. */
broxton_ddi_phy_uninit(dev);
broxton_uninit_cdclk(dev);
bxt_enable_dc9(dev_priv);
return 0;
}
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
/* TODO: when CSR FW support is added make sure the FW is loaded */
bxt_disable_dc9(dev_priv);
/*
* TODO: when DC5 support is added enable DC5 here if the CSR FW
* is available.
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
intel_prepare_ddi(dev);
return 0;
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
...@@ -1195,7 +1240,21 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) ...@@ -1195,7 +1240,21 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
u32 val; u32 val;
int err; int err;
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
/* Wait for a previous force-off to settle */
if (force_on && !IS_CHERRYVIEW(dev_priv->dev)) {
/* WARN_ON only for the Valleyview */
WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
err = wait_for(!COND, 20);
if (err) {
DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
}
}
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT; val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
...@@ -1454,6 +1513,9 @@ static int intel_runtime_resume(struct device *device) ...@@ -1454,6 +1513,9 @@ static int intel_runtime_resume(struct device *device)
if (IS_GEN6(dev_priv)) if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev); intel_init_pch_refclk(dev);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv))
...@@ -1486,7 +1548,9 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1486,7 +1548,9 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
int ret; int ret;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) if (IS_BROXTON(dev))
ret = bxt_suspend_complete(dev_priv);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_suspend_complete(dev_priv); ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
ret = vlv_suspend_complete(dev_priv); ret = vlv_suspend_complete(dev_priv);
......
This diff is collapsed.
This diff is collapsed.
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_batch_pool.h"
/** /**
* DOC: batch pool * DOC: batch pool
...@@ -46,8 +47,12 @@ ...@@ -46,8 +47,12 @@
void i915_gem_batch_pool_init(struct drm_device *dev, void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool) struct i915_gem_batch_pool *pool)
{ {
int n;
pool->dev = dev; pool->dev = dev;
INIT_LIST_HEAD(&pool->cache_list);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
} }
/** /**
...@@ -58,33 +63,35 @@ void i915_gem_batch_pool_init(struct drm_device *dev, ...@@ -58,33 +63,35 @@ void i915_gem_batch_pool_init(struct drm_device *dev,
*/ */
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{ {
int n;
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
while (!list_empty(&pool->cache_list)) { for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
while (!list_empty(&pool->cache_list[n])) {
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj =
list_first_entry(&pool->cache_list, list_first_entry(&pool->cache_list[n],
struct drm_i915_gem_object, struct drm_i915_gem_object,
batch_pool_list); batch_pool_link);
WARN_ON(obj->active); list_del(&obj->batch_pool_link);
list_del_init(&obj->batch_pool_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }
}
} }
/** /**
* i915_gem_batch_pool_get() - select a buffer from the pool * i915_gem_batch_pool_get() - allocate a buffer from the pool
* @pool: the batch buffer pool * @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer * @size: the minimum desired size of the returned buffer
* *
* Finds or allocates a batch buffer in the pool with at least the requested * Returns an inactive buffer from @pool with at least @size bytes,
* size. The caller is responsible for any domain, active/inactive, or * with the pages pinned. The caller must i915_gem_object_unpin_pages()
* purgeability management for the returned buffer. * on the returned object.
* *
* Note: Callers must hold the struct_mutex * Note: Callers must hold the struct_mutex
* *
* Return: the selected batch buffer object * Return: the buffer object or an error pointer
*/ */
struct drm_i915_gem_object * struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
...@@ -92,46 +99,53 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -92,46 +99,53 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
{ {
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next; struct drm_i915_gem_object *tmp, *next;
struct list_head *list;
int n;
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
list_for_each_entry_safe(tmp, next, /* Compute a power-of-two bucket, but throw everything greater than
&pool->cache_list, batch_pool_list) { * 16KiB into the same bucket: i.e. the the buckets hold objects of
* (1 page, 2 pages, 4 pages, 8+ pages).
*/
n = fls(size >> PAGE_SHIFT) - 1;
if (n >= ARRAY_SIZE(pool->cache_list))
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (tmp->active) if (tmp->active)
continue; break;
/* While we're looping, do some clean up */ /* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) { if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_list); list_del(&tmp->batch_pool_link);
drm_gem_object_unreference(&tmp->base); drm_gem_object_unreference(&tmp->base);
continue; continue;
} }
/* if (tmp->base.size >= size) {
* Select a buffer that is at least as big as needed
* but not 'too much' bigger. A better way to do this
* might be to bucket the pool objects based on size.
*/
if (tmp->base.size >= size &&
tmp->base.size <= (2 * size)) {
obj = tmp; obj = tmp;
break; break;
} }
} }
if (!obj) { if (obj == NULL) {
int ret;
obj = i915_gem_alloc_object(pool->dev, size); obj = i915_gem_alloc_object(pool->dev, size);
if (!obj) if (obj == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
list_add_tail(&obj->batch_pool_list, &pool->cache_list); ret = i915_gem_object_get_pages(obj);
} if (ret)
else return ERR_PTR(ret);
/* Keep list in LRU order */
list_move_tail(&obj->batch_pool_list, &pool->cache_list);
obj->madv = I915_MADV_WILLNEED; obj->madv = I915_MADV_DONTNEED;
}
list_move_tail(&obj->batch_pool_link, list);
i915_gem_object_pin_pages(obj);
return obj; return obj;
} }
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef I915_GEM_BATCH_POOL_H
#define I915_GEM_BATCH_POOL_H
#include "i915_drv.h"
struct i915_gem_batch_pool {
struct drm_device *dev;
struct list_head cache_list[4];
};
/* i915_gem_batch_pool.c */
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
#endif /* I915_GEM_BATCH_POOL_H */
...@@ -157,6 +157,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) ...@@ -157,6 +157,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
if (obj == NULL) if (obj == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -573,20 +575,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring, ...@@ -573,20 +575,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
struct intel_context *from, struct intel_context *from,
struct intel_context *to) struct intel_context *to)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private;
if (to->remap_slice) if (to->remap_slice)
return false; return false;
if (to->ppgtt) { if (to->ppgtt && from == to &&
if (from == to && !test_bit(ring->id, !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
&to->ppgtt->pd_dirty_rings))
return true; return true;
} else if (dev_priv->mm.aliasing_ppgtt) {
if (from == to && !test_bit(ring->id,
&dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
return true;
}
return false; return false;
} }
...@@ -636,7 +630,6 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -636,7 +630,6 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context; struct intel_context *from = ring->last_context;
u32 hw_flags = 0; u32 hw_flags = 0;
bool uninitialized = false; bool uninitialized = false;
struct i915_vma *vma;
int ret, i; int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) { if (from != NULL && ring == &dev_priv->ring[RCS]) {
...@@ -673,7 +666,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -673,7 +666,7 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out; goto unpin_out;
/* Doing a PD load always reloads the page dirs */ /* Doing a PD load always reloads the page dirs */
clear_bit(ring->id, &to->ppgtt->pd_dirty_rings); to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
} }
if (ring != &dev_priv->ring[RCS]) { if (ring != &dev_priv->ring[RCS]) {
...@@ -694,16 +687,6 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -694,16 +687,6 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret) if (ret)
goto unpin_out; goto unpin_out;
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND)) {
ret = i915_vma_bind(vma,
to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND);
/* This shouldn't ever fail. */
if (WARN_ONCE(ret, "GGTT context bind failed!"))
goto unpin_out;
}
if (!to->legacy_hw_ctx.initialized) { if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to /* NB: If we inhibit the restore, the context is not allowed to
...@@ -711,8 +694,10 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -711,8 +694,10 @@ static int do_switch(struct intel_engine_cs *ring,
* space. This means we must enforce that a page table load * space. This means we must enforce that a page table load
* occur when this occurs. */ * occur when this occurs. */
} else if (to->ppgtt && } else if (to->ppgtt &&
test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE; hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
/* We should never emit switch_mm more than once */ /* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) && WARN_ON(needs_pd_load_pre(ring, to) &&
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30) #define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29) #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024) #define BATCH_OFFSET_BIAS (256*1024)
...@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) ...@@ -224,12 +223,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN) if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--; vma->pin_count--;
if (entry->flags & __EXEC_OBJECT_PURGEABLE) entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
obj->madv = I915_MADV_DONTNEED;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
__EXEC_OBJECT_HAS_PIN |
__EXEC_OBJECT_PURGEABLE);
} }
static void eb_destroy(struct eb_vmas *eb) static void eb_destroy(struct eb_vmas *eb)
...@@ -406,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -406,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */ * through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) && if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
!(target_vma->bound & GLOBAL_BIND))) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND); PIN_GLOBAL);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret; return ret;
} }
...@@ -591,12 +584,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -591,12 +584,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
uint64_t flags; uint64_t flags;
int ret; int ret;
flags = 0; flags = PIN_USER;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (!drm_mm_node_allocated(&vma->node)) { if (!drm_mm_node_allocated(&vma->node)) {
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
flags |= PIN_GLOBAL | PIN_MAPPABLE; flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
} }
...@@ -606,7 +600,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, ...@@ -606,7 +600,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
only_mappable_for_reloc(entry->flags)) only_mappable_for_reloc(entry->flags))
ret = i915_gem_object_pin(obj, vma->vm, ret = i915_gem_object_pin(obj, vma->vm,
entry->alignment, entry->alignment,
flags & ~(PIN_GLOBAL | PIN_MAPPABLE)); flags & ~PIN_MAPPABLE);
if (ret) if (ret)
return ret; return ret;
...@@ -1142,12 +1136,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1142,12 +1136,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
u32 batch_len, u32 batch_len,
bool is_master) bool is_master)
{ {
struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj; struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
PAGE_ALIGN(batch_len)); PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj)) if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj; return shadow_batch_obj;
...@@ -1165,11 +1158,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1165,11 +1158,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
if (ret) if (ret)
goto err; goto err;
i915_gem_object_unpin_pages(shadow_batch_obj);
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry; vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base); drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas); list_add_tail(&vma->exec_list, &eb->vmas);
...@@ -1178,6 +1173,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, ...@@ -1178,6 +1173,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
return shadow_batch_obj; return shadow_batch_obj;
err: err:
i915_gem_object_unpin_pages(shadow_batch_obj);
if (ret == -EACCES) /* unhandled chained batch */ if (ret == -EACCES) /* unhandled chained batch */
return batch_obj; return batch_obj;
else else
...@@ -1251,12 +1247,8 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ...@@ -1251,12 +1247,8 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret) if (ret)
goto error; goto error;
if (ctx->ppgtt) WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name); "%s didn't clear reload\n", ring->name);
else if (dev_priv->mm.aliasing_ppgtt)
WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
(1<<ring->id), "%s didn't clear reload\n", ring->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK;
...@@ -1566,12 +1558,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1566,12 +1558,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* dispatch_execbuffer implementations. We specifically * dispatch_execbuffer implementations. We specifically
* don't want that set when the command parser is * don't want that set when the command parser is
* enabled. * enabled.
*
* FIXME: with aliasing ppgtt, buffers that should only
* be in ggtt still end up in the aliasing ppgtt. remove
* this check when that is fixed.
*/ */
if (USES_FULL_PPGTT(dev)) if (USES_PPGTT(dev))
dispatch_flags |= I915_DISPATCH_SECURE; dispatch_flags |= I915_DISPATCH_SECURE;
exec_start = 0; exec_start = 0;
...@@ -1601,7 +1589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1601,7 +1589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} else } else
exec_start += i915_gem_obj_offset(batch_obj, vm); exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
&eb->vmas, batch_obj, exec_start, &eb->vmas, batch_obj, exec_start,
dispatch_flags); dispatch_flags);
......
This diff is collapsed.
...@@ -158,7 +158,6 @@ struct i915_vma { ...@@ -158,7 +158,6 @@ struct i915_vma {
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0) #define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1) #define LOCAL_BIND (1<<1)
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4; unsigned int bound : 4;
/** /**
...@@ -196,36 +195,30 @@ struct i915_vma { ...@@ -196,36 +195,30 @@ struct i915_vma {
* bits with absolutely no headroom. So use 4 bits. */ * bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4; unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
}; };
struct i915_page_table_entry { struct i915_page_table {
struct page *page; struct page *page;
dma_addr_t daddr; dma_addr_t daddr;
unsigned long *used_ptes; unsigned long *used_ptes;
}; };
struct i915_page_directory_entry { struct i915_page_directory {
struct page *page; /* NULL for GEN6-GEN7 */ struct page *page; /* NULL for GEN6-GEN7 */
union { union {
uint32_t pd_offset; uint32_t pd_offset;
dma_addr_t daddr; dma_addr_t daddr;
}; };
struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */ unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
}; };
struct i915_page_directory_pointer_entry { struct i915_page_directory_pointer {
/* struct page *page; */ /* struct page *page; */
struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES]; DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES];
}; };
struct i915_address_space { struct i915_address_space {
...@@ -267,6 +260,8 @@ struct i915_address_space { ...@@ -267,6 +260,8 @@ struct i915_address_space {
gen6_pte_t (*pte_encode)(dma_addr_t addr, gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level, enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */ bool valid, u32 flags); /* Create a valid PTE */
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm, int (*allocate_va_range)(struct i915_address_space *vm,
uint64_t start, uint64_t start,
uint64_t length); uint64_t length);
...@@ -279,6 +274,13 @@ struct i915_address_space { ...@@ -279,6 +274,13 @@ struct i915_address_space {
uint64_t start, uint64_t start,
enum i915_cache_level cache_level, u32 flags); enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm); void (*cleanup)(struct i915_address_space *vm);
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
}; };
/* The Graphics Translation Table is the way in which GEN hardware translates a /* The Graphics Translation Table is the way in which GEN hardware translates a
...@@ -314,14 +316,13 @@ struct i915_hw_ppgtt { ...@@ -314,14 +316,13 @@ struct i915_hw_ppgtt {
struct kref ref; struct kref ref;
struct drm_mm_node node; struct drm_mm_node node;
unsigned long pd_dirty_rings; unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union { union {
struct i915_page_directory_pointer_entry pdp; struct i915_page_directory_pointer pdp;
struct i915_page_directory_entry pd; struct i915_page_directory pd;
}; };
struct i915_page_table_entry *scratch_pt; struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
...@@ -349,6 +350,11 @@ struct i915_hw_ppgtt { ...@@ -349,6 +350,11 @@ struct i915_hw_ppgtt {
temp = min_t(unsigned, temp, length), \ temp = min_t(unsigned, temp, length), \
start += temp, length -= temp) start += temp, length -= temp)
#define gen6_for_all_pdes(pt, ppgtt, iter) \
for (iter = 0; \
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
iter++)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{ {
const uint32_t mask = NUM_PTE(pde_shift) - 1; const uint32_t mask = NUM_PTE(pde_shift) - 1;
...@@ -397,6 +403,63 @@ static inline uint32_t gen6_pde_index(uint32_t addr) ...@@ -397,6 +403,63 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
return i915_pde_index(addr, GEN6_PDE_SHIFT); return i915_pde_index(addr, GEN6_PDE_SHIFT);
} }
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
*/
#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
for (iter = gen8_pde_index(start); \
pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
for (iter = gen8_pdpe_index(start); \
pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \
iter++, \
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
temp = min(temp, length), \
start += temp, length -= temp)
/* Clamp length to the next page_directory boundary */
static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
{
uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
if (next_pd > (start + length))
return length;
return next_pd - start;
}
static inline uint32_t gen8_pte_index(uint64_t address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
}
static inline uint32_t gen8_pde_index(uint64_t address)
{
return i915_pde_index(address, GEN8_PDE_SHIFT);
}
static inline uint32_t gen8_pdpe_index(uint64_t address)
{
return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
}
static inline uint32_t gen8_pml4e_index(uint64_t address)
{
WARN_ON(1); /* For 64B */
return 0;
}
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
{
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
int i915_gem_gtt_init(struct drm_device *dev); int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev); void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev); void i915_global_gtt_cleanup(struct drm_device *dev);
......
...@@ -184,9 +184,12 @@ static int num_vma_bound(struct drm_i915_gem_object *obj) ...@@ -184,9 +184,12 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
struct i915_vma *vma; struct i915_vma *vma;
int count = 0; int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node)) if (drm_mm_node_allocated(&vma->node))
count++; count++;
if (vma->pin_count)
count++;
}
return count; return count;
} }
...@@ -210,8 +213,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -210,8 +213,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!i915_gem_obj_is_pinned(obj) && if (obj->pages_pin_count == num_vma_bound(obj))
obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
......
...@@ -209,7 +209,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) ...@@ -209,7 +209,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
dev_priv->fbc.threshold = ret; dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev)) if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) { else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
......
...@@ -336,7 +336,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -336,7 +336,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY; ret = -EBUSY;
goto err; goto err;
} }
......
...@@ -251,6 +251,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, ...@@ -251,6 +251,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
return; return;
err_printf(m, "%s command stream:\n", ring_str(ring_idx)); err_printf(m, "%s command stream:\n", ring_str(ring_idx));
err_printf(m, " START: 0x%08x\n", ring->start);
err_printf(m, " HEAD: 0x%08x\n", ring->head); err_printf(m, " HEAD: 0x%08x\n", ring->head);
err_printf(m, " TAIL: 0x%08x\n", ring->tail); err_printf(m, " TAIL: 0x%08x\n", ring->tail);
err_printf(m, " CTL: 0x%08x\n", ring->ctl); err_printf(m, " CTL: 0x%08x\n", ring->ctl);
...@@ -883,6 +884,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -883,6 +884,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
ering->seqno = ring->get_seqno(ring, false); ering->seqno = ring->get_seqno(ring, false);
ering->acthd = intel_ring_get_active_head(ring); ering->acthd = intel_ring_get_active_head(ring);
ering->start = I915_READ_START(ring);
ering->head = I915_READ_HEAD(ring); ering->head = I915_READ_HEAD(ring);
ering->tail = I915_READ_TAIL(ring); ering->tail = I915_READ_TAIL(ring);
ering->ctl = I915_READ_CTL(ring); ering->ctl = I915_READ_CTL(ring);
......
This diff is collapsed.
This diff is collapsed.
...@@ -220,7 +220,7 @@ DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, ...@@ -220,7 +220,7 @@ DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
DECLARE_EVENT_CLASS(i915_page_table_entry_update, DECLARE_EVENT_CLASS(i915_page_table_entry_update,
TP_PROTO(struct i915_address_space *vm, u32 pde, TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits), TP_ARGS(vm, pde, pt, first, count, bits),
TP_STRUCT__entry( TP_STRUCT__entry(
...@@ -250,7 +250,7 @@ DECLARE_EVENT_CLASS(i915_page_table_entry_update, ...@@ -250,7 +250,7 @@ DECLARE_EVENT_CLASS(i915_page_table_entry_update,
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map, DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
TP_PROTO(struct i915_address_space *vm, u32 pde, TP_PROTO(struct i915_address_space *vm, u32 pde,
struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits) TP_ARGS(vm, pde, pt, first, count, bits)
); );
...@@ -504,7 +504,6 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -504,7 +504,6 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
), ),
...@@ -513,13 +512,11 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -513,13 +512,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
i915_gem_request_get_ring(req); i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->uniq, __entry->dev, __entry->ring, __entry->seqno)
__entry->seqno)
); );
DEFINE_EVENT(i915_gem_request, i915_gem_request_add, DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
...@@ -564,7 +561,6 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -564,7 +561,6 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u32, ring)
__field(u32, uniq)
__field(u32, seqno) __field(u32, seqno)
__field(bool, blocking) __field(bool, blocking)
), ),
...@@ -580,14 +576,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -580,14 +576,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
i915_gem_request_get_ring(req); i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index; __entry->dev = ring->dev->primary->index;
__entry->ring = ring->id; __entry->ring = ring->id;
__entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex); mutex_is_locked(&ring->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->uniq, __entry->dev, __entry->ring,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no") __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
); );
...@@ -596,33 +591,6 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, ...@@ -596,33 +591,6 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_ARGS(req) TP_ARGS(req)
); );
DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
),
TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request, TRACE_EVENT(i915_flip_request,
TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_PROTO(int plane, struct drm_i915_gem_object *obj),
......
This diff is collapsed.
...@@ -162,6 +162,30 @@ static int intel_plane_atomic_check(struct drm_plane *plane, ...@@ -162,6 +162,30 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
(1 << drm_plane_index(plane)); (1 << drm_plane_index(plane));
} }
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
/*
* 90/270 is not allowed with RGB64 16:16:16:16,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(state->fb->pixel_format));
return -EINVAL;
default:
break;
}
}
return intel_plane->check_plane(plane, intel_state); return intel_plane->check_plane(plane, intel_state);
} }
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h" #include "i915_drv.h"
/** /**
...@@ -485,7 +484,8 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev) ...@@ -485,7 +484,8 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
return -ENODEV; return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
ret = intel_ddi_get_cdclk_freq(dev_priv); ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret; return ret;
......
...@@ -438,7 +438,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, ...@@ -438,7 +438,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) { if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin; int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_port_valid(bus_pin)) if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin; dev_priv->vbt.crt_ddc_pin = bus_pin;
} else { } else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
...@@ -447,6 +447,12 @@ parse_general_definitions(struct drm_i915_private *dev_priv, ...@@ -447,6 +447,12 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} }
} }
static union child_device_config *
child_device_ptr(struct bdb_general_definitions *p_defs, int i)
{
return (void *) &p_defs->devices[i * p_defs->child_dev_size];
}
static void static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) struct bdb_header *bdb)
...@@ -476,10 +482,10 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, ...@@ -476,10 +482,10 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
block_size = get_blocksize(p_defs); block_size = get_blocksize(p_defs);
/* get the number of child device */ /* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) / child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child); p_defs->child_dev_size;
count = 0; count = 0;
for (i = 0; i < child_device_num; i++) { for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]); p_child = child_device_ptr(p_defs, i);
if (!p_child->old.device_type) { if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */ /* skip the device block if device type is invalid */
continue; continue;
...@@ -1067,25 +1073,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1067,25 +1073,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return; return;
} }
/* judge whether the size of child device meets the requirements. if (p_defs->child_dev_size < sizeof(*p_child)) {
* If the child device size obtained from general definition block DRM_ERROR("General definiton block child device size is too small.\n");
* is different with sizeof(struct child_device_config), skip the
* parsing of sdvo device info
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return; return;
} }
/* get the block size of general definitions */ /* get the block size of general definitions */
block_size = get_blocksize(p_defs); block_size = get_blocksize(p_defs);
/* get the number of child device */ /* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) / child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child); p_defs->child_dev_size;
count = 0; count = 0;
/* get the number of child device that is present */ /* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) { for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]); p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) { if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */ /* skip the device block if device type is invalid */
continue; continue;
...@@ -1105,7 +1105,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1105,7 +1105,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
dev_priv->vbt.child_dev_num = count; dev_priv->vbt.child_dev_num = count;
count = 0; count = 0;
for (i = 0; i < child_device_num; i++) { for (i = 0; i < child_device_num; i++) {
p_child = &(p_defs->devices[i]); p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) { if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */ /* skip the device block if device type is invalid */
continue; continue;
...@@ -1133,7 +1133,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) ...@@ -1133,7 +1133,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
enum port port; enum port port;
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */ /* Default to having backlight */
dev_priv->vbt.backlight.present = true; dev_priv->vbt.backlight.present = true;
......
...@@ -277,9 +277,9 @@ struct bdb_general_definitions { ...@@ -277,9 +277,9 @@ struct bdb_general_definitions {
* And the device num is related with the size of general definition * And the device num is related with the size of general definition
* block. It is obtained by using the following formula: * block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/ * number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config); * defs->child_dev_size;
*/ */
union child_device_config devices[0]; uint8_t devices[0];
} __packed; } __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ /* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
......
...@@ -747,7 +747,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) ...@@ -747,7 +747,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
goto out; goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */ /* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
ret = intel_crt_ddc_get_modes(connector, i2c); ret = intel_crt_ddc_get_modes(connector, i2c);
out: out:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -150,14 +150,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) ...@@ -150,14 +150,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
int ret; int ret;
uint32_t temp; uint32_t temp;
struct intel_connector *found = NULL, *intel_connector; struct intel_connector *found = NULL, *connector;
int slots; int slots;
struct drm_crtc *crtc = encoder->base.crtc; struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
for_each_intel_connector(dev, intel_connector) { for_each_intel_connector(dev, connector) {
if (intel_connector->new_encoder == encoder) { if (connector->base.state->best_encoder == &encoder->base) {
found = intel_connector; found = connector;
break; break;
} }
} }
...@@ -173,6 +173,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) ...@@ -173,6 +173,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) { if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder); enum port port = intel_ddi_get_encoder_port(encoder);
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel); intel_crtc->config->ddi_pll_sel);
......
This diff is collapsed.
This diff is collapsed.
...@@ -243,6 +243,8 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, ...@@ -243,6 +243,8 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
/* Remove stale busy bits due to the old buffer. */ /* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); mutex_unlock(&dev_priv->fb_tracking.lock);
intel_psr_single_frame_update(dev);
} }
/** /**
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment