Commit 9afafdbf authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel into drm-next

Getting started with v4.15 features:

- Cannonlake workarounds (Rodrigo, Oscar)
- Infoframe refactoring and fixes to enable infoframes for DP (Ville)
- VBT definition updates (Jani)
- Sparse warning fixes (Ville, Chris)
- Crtc state usage fixes and cleanups (Ville)
- DP vswing, pre-emph and buffer translation refactoring and fixes (Rodrigo)
- Prevent IPS from interfering with CRC capture (Ville, Marta)
- Enable Mesa to advertise ARB_timer_query (Nanley)
- Refactor GT number into intel_device_info (Lionel)
- Avoid eDP DP AUX CH timeouts harder (Manasi)
- CDCLK check improvements (Ville)
- Restore GPU clock boost on missed pageflip vblanks (Chris)
- Fence register reservation API for vGPU (Changbin)
- First batch of CCS fixes (Ville)
- Finally, numerous GEM fixes, cleanups and improvements (Chris)

* tag 'drm-intel-next-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel: (100 commits)
  drm/i915: Update DRIVER_DATE to 20170907
  drm/i915/cnl: WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod)
  drm/i915: Lift has-pinned-pages assert to caller of ____i915_gem_object_get_pages
  drm/i915: Display WA #1133 WaFbcSkipSegments:cnl, glk
  drm/i915/cnl: Allow the reg_read ioctl to read the RCS TIMESTAMP register
  drm/i915: Move device_info.has_snoop into the static tables
  drm/i915: Disable MI_STORE_DATA_IMM for i915g/i915gm
  drm/i915: Re-enable GTT following a device reset
  drm/i915/cnp: Wa 1181: Fix Backlight issue
  drm/i915: Annotate user relocs with __user
  drm/i915: Constify load detect mode
  drm/i915/perf: Remove __user from u64 in drm_i915_perf_oa_config
  drm/i915: Silence sparse by using gfp_t
  drm/i915: io unmap functions want __iomem
  drm/i915: Add __rcu to radix tree slot pointer
  drm/i915: Wake up the device for the fbdev setup
  drm/i915: Add interface to reserve fence registers for vGPU
  drm/i915: Use correct path to trace include
  drm/i915: Fix the missing PPAT cache attributes on CNL
  drm/i915: Fix enum pipe vs. enum transcoder for the PCH transcoder
  ...
parents 29baa82a bb9d2d05
......@@ -150,5 +150,3 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
......@@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
......@@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
i = 0;
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
reg = list_entry(pos, struct drm_i915_fence_reg, link);
if (reg->pin_count || reg->vma)
continue;
list_del(pos);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
if (IS_ERR(reg))
goto out_free_fence;
vgpu->fence.regs[i] = reg;
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
_clear_vgpu_fence(vgpu);
......@@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
......
......@@ -239,7 +239,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
!IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
......
......@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170818"
#define DRIVER_TIMESTAMP 1503088845
#define DRIVER_DATE "20170907"
#define DRIVER_TIMESTAMP 1504772900
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
......@@ -569,6 +569,24 @@ struct i915_hotplug {
(__i)++) \
for_each_if (plane_state)
#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_crtc && \
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
(__i)++) \
for_each_if (crtc)
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
(old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
(__i)++) \
for_each_if (plane)
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
......@@ -841,6 +859,7 @@ struct intel_device_info {
u8 gen;
u16 gen_mask;
enum intel_platform platform;
u8 gt; /* GT number, 0 if undefined */
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
#define DEFINE_FLAG(name) u8 name:1
......@@ -1106,6 +1125,7 @@ struct intel_fbc {
} fb;
int cfb_size;
unsigned int gen9_wa_cfb_stride;
} params;
struct intel_fbc_work {
......@@ -1464,6 +1484,11 @@ struct i915_gem_mm {
struct llist_head free_list;
struct work_struct free_work;
/**
* Small stash of WC pages
*/
struct pagevec wc_stash;
/** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */
......@@ -1717,7 +1742,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
int child_dev_num;
union child_device_config *child_dev;
struct child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
......@@ -2328,7 +2353,8 @@ struct drm_i915_private {
struct mutex dpll_lock;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
......@@ -2861,9 +2887,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
(dev_priv)->info.gt == 1)
#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
......@@ -2885,11 +2910,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
......@@ -2910,15 +2935,15 @@ intel_info(const struct drm_i915_private *dev_priv)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
(dev_priv)->info.gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
(dev_priv)->info.gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
......@@ -3647,6 +3672,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
......@@ -4332,11 +4360,4 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
static inline bool
intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
engine->class);
}
#endif
......@@ -1013,17 +1013,20 @@ gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void *vaddr;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = (void __force *)
io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data, vaddr + offset, length);
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap(vaddr);
}
return unwritten;
......@@ -1189,18 +1192,18 @@ ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void *vaddr;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = (void __force *)
io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user(vaddr + offset, user_data, length);
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap(vaddr);
}
......@@ -2476,8 +2479,6 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
......@@ -2507,6 +2508,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return err;
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
......@@ -2590,6 +2593,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
......@@ -3257,11 +3262,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv)
continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
GEM_BUG_ON(vma->obj != obj);
/* We allow the process to have multiple handles to the same
......@@ -3375,26 +3380,14 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
{
return wait_for(intel_engine_is_idle(engine), timeout_ms);
}
static int wait_for_engines(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
if (wait_for(intel_engines_are_idle(i915), 50)) {
DRM_ERROR("Failed to idle engines, declaring wedged!\n");
i915_gem_set_wedged(i915);
return -EIO;
}
GEM_BUG_ON(intel_engine_get_seqno(engine) !=
intel_engine_last_submit(engine));
}
return 0;
}
......@@ -4426,6 +4419,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
......@@ -4533,6 +4527,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
if (i915_terminally_wedged(&i915->gpu_error)) {
mutex_lock(&i915->drm.struct_mutex);
i915_gem_unset_wedged(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
......@@ -4572,7 +4572,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
......@@ -4594,7 +4594,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
WARN_ON(!intel_engines_are_idle(dev_priv));
if (WARN_ON(!intel_engines_are_idle(dev_priv)))
i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
/*
* Neither the BIOS, ourselves or any other kernel
......@@ -4616,11 +4617,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machine in an unusable condition.
*/
i915_gem_sanitize(dev_priv);
goto out_rpm_put;
intel_runtime_pm_put(dev_priv);
return 0;
err_unlock:
mutex_unlock(&dev->struct_mutex);
out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}
......
......@@ -268,6 +268,11 @@ static inline u64 gen8_noncanonical_addr(u64 address)
return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return eb->engine->needs_cmd_parser && eb->batch_len;
}
static int eb_create(struct i915_execbuffer *eb)
{
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
......@@ -1159,6 +1164,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
/* If we need to copy for the cmdparser, we will stall anyway */
if (eb_use_cmdparser(eb))
return ERR_PTR(-EWOULDBLOCK);
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
......@@ -1183,9 +1195,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
!reservation_object_test_signaled_rcu(vma->resv, true)) &&
__intel_engine_can_store_dword(eb->reloc_cache.gen,
eb->engine->class)) {
!reservation_object_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
......@@ -2291,7 +2301,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
if (eb.engine->needs_cmd_parser && eb.batch_len) {
if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file));
......
......@@ -359,6 +359,57 @@ i915_vma_get_fence(struct i915_vma *vma)
return fence_update(fence, set);
}
/**
* i915_reserve_fence - Reserve a fence for vGPU
* @dev_priv: i915 device private
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv)
{
struct drm_i915_fence_reg *fence;
int count;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
fence = fence_find(dev_priv);
if (IS_ERR(fence))
return fence;
if (fence->vma) {
/* Force-remove fence from VMA */
ret = fence_update(fence, NULL);
if (ret)
return ERR_PTR(ret);
}
list_del(&fence->link);
return fence;
}
/**
* i915_unreserve_fence - Reclaim a reserved fence
* @fence: the fence reg
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
{
lockdep_assert_held(&fence->i915->drm.struct_mutex);
list_add(&fence->link, &fence->i915->mm.fence_list);
}
/**
* i915_gem_revoke_fences - revoke fence state
* @dev_priv: i915 device private
......
......@@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct page *page;
struct pagevec *pvec = &vm->free_pages;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
if (vm->free_pages.nr)
return vm->free_pages.pages[--vm->free_pages.nr];
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
/* A placeholder for a specific mutex to guard the WC stash */
lockdep_assert_held(&vm->i915->drm.struct_mutex);
/* Look in our global stash of WC pages... */
pvec = &vm->i915->mm.wc_stash;
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
do {
struct page *page;
page = alloc_page(gfp);
if (!page)
if (unlikely(!page))
break;
pvec->pages[pvec->nr++] = page;
} while (pagevec_space(pvec));
if (unlikely(!pvec->nr))
return NULL;
if (vm->pt_kmap_wc)
set_pages_array_wc(&page, 1);
set_pages_array_wc(pvec->pages, pvec->nr);
return page;
return pvec->pages[--pvec->nr];
}
static void vm_free_pages_release(struct i915_address_space *vm)
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
GEM_BUG_ON(!pagevec_count(&vm->free_pages));
struct pagevec *pvec = &vm->free_pages;
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
struct pagevec *stash = &vm->i915->mm.wc_stash;
if (vm->pt_kmap_wc)
set_pages_array_wb(vm->free_pages.pages,
pagevec_count(&vm->free_pages));
/* When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
__pagevec_release(&vm->free_pages);
lockdep_assert_held(&vm->i915->drm.struct_mutex);
if (pagevec_space(stash)) {
do {
stash->pages[stash->nr++] =
pvec->pages[--pvec->nr];
if (!pvec->nr)
return;
} while (pagevec_space(stash));
/* As we have made some room in the VM's free_pages,
* we can wait for it to fill again. Unless we are
* inside i915_address_space_fini() and must
* immediately release the pages!
*/
if (!immediate)
return;
}
set_pages_array_wb(pvec->pages, pvec->nr);
}
__pagevec_release(pvec);
}
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm);
vm_free_pages_release(vm, false);
}
static int __setup_page_dma(struct i915_address_space *vm,
......@@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
struct page *page;
dma_addr_t addr;
page = alloc_page(gfp | __GFP_ZERO);
if (unlikely(!page))
return -ENOMEM;
addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(vm->dma, addr))) {
__free_page(page);
return -ENOMEM;
}
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
return 0;
}
static void cleanup_scratch_page(struct i915_address_space *vm)
{
cleanup_page_dma(vm, &vm->scratch_page);
struct i915_page_dma *p = &vm->scratch_page;
dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
......@@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1ULL << 48 :
1ULL << 32;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ppgtt->base.pt_kmap_wc = true;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
if (use_4lvl(vm)) {
ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret)
......@@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
static void i915_address_space_fini(struct i915_address_space *vm)
{
if (pagevec_count(&vm->free_pages))
vm_free_pages_release(vm);
vm_free_pages_release(vm, true);
i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
......@@ -1885,12 +1951,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_GEN9_BC(dev_priv))
else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
......@@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
struct pagevec *pvec;
ggtt->base.closed = true;
......@@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
}
ggtt->base.cleanup(&ggtt->base);
pvec = &dev_priv->mm.wc_stash;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
......@@ -2716,13 +2790,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
* dropped. For WC mappings in general we have 64 byte burst writes
* when the WC buffer is flushed, so we can't use it, but have to
* On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
* will be dropped. For WC mappings in general we have 64 byte burst
* writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
......
......@@ -336,7 +336,7 @@ void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
__gen6_mask_pm_irq(dev_priv, mask);
}
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
......@@ -347,7 +347,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
POSTING_READ(reg);
}
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
......@@ -357,7 +357,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
......@@ -405,7 +405,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
synchronize_irq(dev_priv->drm.irq);
/* Now that we will not be generating any more work, flush any
* outsanding tasks. As we are called on the RPS idle path,
* outstanding tasks. As we are called on the RPS idle path,
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/
......
This diff is collapsed.
......@@ -2373,6 +2373,7 @@ enum i915_power_well_id {
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
......@@ -2491,6 +2492,7 @@ enum i915_power_well_id {
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
......@@ -2938,6 +2940,9 @@ enum i915_power_well_id {
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define GLK_SKIP_SEG_EN (1<<12)
#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
......@@ -3806,6 +3811,12 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
/*
* GEN10 clock gating regs
*/
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
/*
* Display engine regs
*/
......@@ -6916,6 +6927,10 @@ enum {
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL0_PWR_DOWN (1 << 10)
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define FBC_STRIDE_OVERRIDE (1 << 13)
#define FBC_STRIDE_MASK 0x1FFF
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
......@@ -7017,6 +7032,7 @@ enum {
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
......@@ -7470,6 +7486,7 @@ enum {
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
......@@ -8044,10 +8061,12 @@ enum {
#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
......@@ -8059,6 +8078,7 @@ enum {
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
......
......@@ -1031,5 +1031,5 @@ TRACE_EVENT(switch_mm,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#include <trace/define_trace.h>
......@@ -107,7 +107,9 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
......@@ -124,7 +126,7 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!intel_state->base.crtc && !plane->state->crtc)
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
......@@ -194,16 +196,21 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
old_plane_state,
state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_plane_state *new_plane_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *drm_crtc_state;
crtc = crtc ? crtc : plane->state->crtc;
struct drm_atomic_state *state = new_plane_state->state;
const struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
......@@ -214,29 +221,33 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (WARN_ON(!drm_crtc_state))
return -EINVAL;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
to_intel_plane_state(state));
return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
to_intel_crtc_state(new_crtc_state),
to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
const struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, intel_plane);
struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
if (new_plane_state->base.visible) {
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
if (intel_state->base.visible) {
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
intel_plane->update_plane(intel_plane,
to_intel_crtc_state(crtc->state),
intel_state);
new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
......
This diff is collapsed.
This diff is collapsed.
......@@ -143,7 +143,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
const struct intel_crtc_state *crtc_state,
int mode)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
......@@ -194,28 +194,28 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
}
static void intel_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
static void pch_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_crt(encoder, old_crtc_state, old_conn_state);
}
static void hsw_post_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
......@@ -228,8 +228,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
}
static void intel_enable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
}
......
This diff is collapsed.
......@@ -412,7 +412,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
else if (INTEL_INFO(dev_priv)->gen >= 9)
gen9_sseu_info_init(dev_priv);
info->has_snoop = !info->has_llc;
WARN_ON(info->has_snoop != !info->has_llc);
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
......
This diff is collapsed.
This diff is collapsed.
......@@ -123,8 +123,8 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector,
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
......@@ -146,8 +146,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
......@@ -176,8 +176,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
......@@ -219,8 +219,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
......
This diff is collapsed.
......@@ -731,7 +731,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config);
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
......@@ -783,8 +783,8 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
*/
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
......@@ -878,8 +878,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* the pre_enable hook.
*/
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
}
......@@ -889,8 +889,8 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder,
* the post_disable hook.
*/
static void intel_dsi_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
......@@ -925,8 +925,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
......@@ -1370,7 +1370,7 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
......
......@@ -175,8 +175,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dvo(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
......@@ -189,8 +189,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
}
static void intel_enable_dvo(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
......@@ -258,8 +258,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
......
......@@ -1065,6 +1065,51 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return 0;
}
static int cnl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaDisableI2mCycleOnWRPort: cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT(GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
/* WaInPlaceDecompressionHang:cnl */
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix: cnl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaEnablePreemptionGranularityControlByUMD:cnl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
return 0;
}
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
......@@ -1185,6 +1230,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
err = glk_init_workarounds(engine);
else if (IS_COFFEELAKE(dev_priv))
err = cfl_init_workarounds(engine);
else if (IS_CANNONLAKE(dev_priv))
err = cnl_init_workarounds(engine);
else
err = 0;
if (err)
......@@ -1335,6 +1382,21 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
}
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 2:
return false; /* uses physical not virtual addresses */
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
return true;
}
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif
......@@ -291,6 +291,19 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
if (i915_gem_object_get_tiling(params->vma->obj) !=
I915_TILING_X)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
I915_WRITE(CHICKEN_MISC_4, val);
}
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
......@@ -881,6 +894,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
32 * fbc->threshold) * 8;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
......
......@@ -206,6 +206,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
......@@ -269,6 +270,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
......@@ -276,6 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma);
out_unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}
......
This diff is collapsed.
......@@ -1175,6 +1175,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
case 10:
return 0;
case 9:
wa_bb_fn[0] = gen9_init_indirectctx_bb;
wa_bb_fn[1] = gen9_init_perctx_bb;
......
......@@ -229,8 +229,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
......@@ -306,8 +306,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
......@@ -324,8 +324,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
}
static void intel_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
......@@ -339,8 +339,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
......@@ -349,15 +349,15 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
}
static void pch_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
......
......@@ -506,7 +506,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
......@@ -533,10 +533,24 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
goto put_state;
}
if (HAS_IPS(dev_priv)) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
pipe_config->ips_force_disable = enable;
if (pipe_config->ips_enabled == enable)
pipe_config->base.connectors_changed = true;
}
if (IS_HASWELL(dev_priv)) {
pipe_config->pch_pfit.force_thru = enable;
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
pipe_config->pch_pfit.enabled != enable)
pipe_config->base.connectors_changed = true;
}
ret = drm_atomic_commit(state);
......@@ -570,8 +584,9 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
......@@ -606,7 +621,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source source)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
......@@ -643,14 +657,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
goto out;
}
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
kfree(pipe_crc->entries);
pipe_crc->entries = entries;
......@@ -691,10 +697,9 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
g4x_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(crtc);
else if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, false);
}
ret = 0;
......@@ -914,7 +919,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
......@@ -935,16 +939,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
if (ret != 0)
goto out;
if (source) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(intel_crtc);
}
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
......@@ -953,10 +947,9 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(intel_crtc);
else if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
......
This diff is collapsed.
......@@ -103,28 +103,26 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t val;
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
val = I915_READ(VLV_VSCSDP(pipe));
val = I915_READ(VLV_VSCSDP(crtc->pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
I915_WRITE(VLV_VSCSDP(pipe), val);
I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
}
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct edp_vsc_psr psr_vsc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct edp_vsc_psr psr_vsc;
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
......@@ -145,7 +143,8 @@ static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct edp_vsc_psr psr_vsc;
......@@ -233,16 +232,15 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(aux_ctl_reg, aux_ctl);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
static void vlv_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
I915_WRITE(VLV_PSRCTL(pipe),
I915_WRITE(VLV_PSRCTL(crtc->pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
......@@ -485,16 +483,17 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp)
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 chicken;
if (!HAS_PSR(dev_priv)) {
......@@ -520,11 +519,13 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_support) {
skl_psr_setup_su_vsc(intel_dp);
skl_psr_setup_su_vsc(intel_dp, crtc_state);
chicken = PSR2_VSC_ENABLE_PROG_HEADER;
if (dev_priv->psr.y_cord_support)
chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
I915_WRITE(EDP_PSR_DEBUG_CTL,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
......@@ -533,7 +534,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
} else {
/* set up vsc header for psr1 */
hsw_psr_setup_vsc(intel_dp);
hsw_psr_setup_vsc(intel_dp, crtc_state);
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP
* and HPD. also mask LPSP to avoid dependency on other
......@@ -553,7 +555,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
vlv_psr_setup_vsc(intel_dp, crtc_state);
/* Enable PSR on the panel */
vlv_psr_enable_sink(intel_dp);
......@@ -564,7 +566,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* but let it on inactive state. So we might do this prior
* to active transition, i.e. here.
*/
vlv_psr_enable_source(intel_dp);
vlv_psr_enable_source(intel_dp, crtc_state);
}
/*
......@@ -585,37 +587,38 @@ void intel_psr_enable(struct intel_dp *intel_dp)
mutex_unlock(&dev_priv->psr.lock);
}
static void vlv_psr_disable(struct intel_dp *intel_dp)
static void vlv_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
if (intel_wait_for_register(dev_priv,
VLV_PSRSTAT(intel_crtc->pipe),
VLV_PSRSTAT(crtc->pipe),
VLV_EDP_PSR_IN_TRANS,
0,
1))
WARN(1, "PSR transition took longer than expected\n");
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
val = I915_READ(VLV_PSRCTL(crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
dev_priv->psr.active = false;
} else {
WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
}
}
static void hsw_psr_disable(struct intel_dp *intel_dp)
static void hsw_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
......@@ -664,10 +667,12 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
* @old_crtc_state: old CRTC state
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
......@@ -681,9 +686,9 @@ void intel_psr_disable(struct intel_dp *intel_dp)
/* Disable PSR on Source */
if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
hsw_psr_disable(intel_dp, old_crtc_state);
else
vlv_psr_disable(intel_dp);
vlv_psr_disable(intel_dp, old_crtc_state);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
......
......@@ -735,16 +735,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
static inline bool
__intel_engine_can_store_dword(unsigned int gen, unsigned int class)
{
if (gen <= 2)
return false; /* uses physical not virtual addresses */
if (gen == 6 && class == VIDEO_DECODE_CLASS)
return false; /* b0rked */
return true;
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
#endif /* _INTEL_RINGBUFFER_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1251,7 +1251,7 @@ static const struct register_whitelist {
} whitelist[] = {
{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
.size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
.size = 8, .gen_bitmask = GEN_RANGE(4, 10) },
};
int i915_reg_read_ioctl(struct drm_device *dev,
......
This diff is collapsed.
This diff is collapsed.
......@@ -1509,9 +1509,9 @@ struct drm_i915_perf_oa_config {
__u32 n_boolean_regs;
__u32 n_flex_regs;
__u64 __user mux_regs_ptr;
__u64 __user boolean_regs_ptr;
__u64 __user flex_regs_ptr;
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
#if defined(__cplusplus)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment