Commit 85bd5ac3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel into drm-next

- VBT code refactor for a clean split between parsing&using of firmware
  information (Jani)
- untangle the pll computation code, and splitting up the monster
  i9xx_crtc_compute_clocks (Ander)
- dsi support for bxt (Jani, Shashank Sharma and others)
- color manager (i.e. de-gamma, color conversion matrix & gamma support) from
  Lionel Landwerlin
- Vulkan hsw support in the command parser (Jordan Justen)
- large-scale renaming of intel_engine_cs variables/parameters to avoid the epic
  ring vs. engine confusion introduced in gen8 (Tvrtko Ursulin)
- few atomic patches from Maarten&Matt, big one is two-stage wm programming on ilk-bdw
- refactor driver load and add infrastructure to inject load failures for
  testing, from Imre
- various small things all over

* tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel: (179 commits)
  drm/i915: Update DRIVER_DATE to 20160330
  drm/i915: Call intel_dp_mst_resume() before resuming displays
  drm/i915: Fix races on fbdev
  drm/i915: remove unused dev_priv->render_reclock_avail
  drm/i915: move sdvo mappings to vbt data
  drm/i915: move edp low vswing config to vbt data
  drm/i915: use a substruct in vbt data for edp
  drm/i915: replace for_each_engine()
  drm/i915: introduce for_each_engine_id()
  drm/i915/bxt: Fix DSI HW state readout
  drm/i915: Remove vblank wait from hsw_enable_ips, v2.
  drm/i915: Tidy aliasing_gtt_bind_vma()
  drm/i915: Split PNV version of crtc_compute_clock()
  drm/i915: Split g4x_crtc_compute_clock()
  drm/i915: Split i8xx_crtc_compute_clock()
  drm/i915: Split CHV and VLV specific crtc_compute_clock() hooks
  drm/i915: Merge ironlake_compute_clocks() and ironlake_crtc_compute_clock()
  drm/i915: Move fp divisor calculation into ironlake_compute_dpll()
  drm/i915: Pass crtc_state->dpll directly to ->find_dpll()
  drm/i915: Simplify ironlake_crtc_compute_clock() CPU eDP case
  ...
parents e7c8e544 68d4aee9
...@@ -2153,7 +2153,11 @@ void intel_crt_init(struct drm_device *dev) ...@@ -2153,7 +2153,11 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >ENUM</td> <td valign="top" >ENUM</td>
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td> <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
<td valign="top" >Connector</td> <td valign="top" >Connector</td>
<td valign="top" >TBD</td> <td valign="top" >When this property is set to Limited 16:235
and CTM is set, the hardware will be programmed with the
result of the multiplication of CTM by the limited range
matrix to ensure the pixels normaly in the range 0..1.0 are
remapped to the range 16/255..235/255.</td>
</tr> </tr>
<tr> <tr>
<td valign="top" >“audio”</td> <td valign="top" >“audio”</td>
...@@ -3334,7 +3338,7 @@ int num_ioctls;</synopsis> ...@@ -3334,7 +3338,7 @@ int num_ioctls;</synopsis>
<title>Video BIOS Table (VBT)</title> <title>Video BIOS Table (VBT)</title>
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT) !Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
!Idrivers/gpu/drm/i915/intel_bios.c !Idrivers/gpu/drm/i915/intel_bios.c
!Idrivers/gpu/drm/i915/intel_bios.h !Idrivers/gpu/drm/i915/intel_vbt_defs.h
</sect2> </sect2>
</sect1> </sect1>
......
...@@ -56,3 +56,9 @@ config DRM_I915_USERPTR ...@@ -56,3 +56,9 @@ config DRM_I915_USERPTR
selected to enabled full userptr support. selected to enabled full userptr support.
If in doubt, say "Y". If in doubt, say "Y".
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
source drivers/gpu/drm/i915/Kconfig.debug
endmenu
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
default n
help
Choose this option to turn on extra driver debugging that may affect
performance but will catch some internal issues.
Recommended for driver developers only.
If in doubt, say "N".
...@@ -55,7 +55,9 @@ i915-y += intel_audio.o \ ...@@ -55,7 +55,9 @@ i915-y += intel_audio.o \
intel_atomic.o \ intel_atomic.o \
intel_atomic_plane.o \ intel_atomic_plane.o \
intel_bios.o \ intel_bios.o \
intel_color.o \
intel_display.o \ intel_display.o \
intel_dpll_mgr.o \
intel_fbc.o \ intel_fbc.o \
intel_fifo_underrun.o \ intel_fifo_underrun.o \
intel_frontbuffer.o \ intel_frontbuffer.o \
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -66,6 +66,11 @@ static struct drm_driver driver; ...@@ -66,6 +66,11 @@ static struct drm_driver driver;
#define IVB_CURSOR_OFFSETS \ #define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
static const struct intel_device_info intel_i830_info = { static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1,
...@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = { ...@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1, .is_mobile = 1,
}; };
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS
static const struct intel_device_info intel_broadwell_d_info = { static const struct intel_device_info intel_broadwell_d_info = {
HSW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
}; };
static const struct intel_device_info intel_broadwell_m_info = { static const struct intel_device_info intel_broadwell_m_info = {
HSW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
}; };
static const struct intel_device_info intel_broadwell_gt3d_info = { static const struct intel_device_info intel_broadwell_gt3d_info = {
HSW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
static const struct intel_device_info intel_broadwell_gt3m_info = { static const struct intel_device_info intel_broadwell_gt3m_info = {
HSW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
...@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = { ...@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
.display_mmio_offset = VLV_DISPLAY_BASE, .display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS, GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS, CURSOR_OFFSETS,
CHV_COLORS,
}; };
static const struct intel_device_info intel_skylake_info = { static const struct intel_device_info intel_skylake_info = {
HSW_FEATURES, BDW_FEATURES,
.is_skylake = 1, .is_skylake = 1,
.gen = 9, .gen = 9,
}; };
static const struct intel_device_info intel_skylake_gt3_info = { static const struct intel_device_info intel_skylake_gt3_info = {
HSW_FEATURES, BDW_FEATURES,
.is_skylake = 1, .is_skylake = 1,
.gen = 9, .gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
...@@ -345,17 +355,18 @@ static const struct intel_device_info intel_broxton_info = { ...@@ -345,17 +355,18 @@ static const struct intel_device_info intel_broxton_info = {
.has_fbc = 1, .has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS, GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS, IVB_CURSOR_OFFSETS,
BDW_COLORS,
}; };
static const struct intel_device_info intel_kabylake_info = { static const struct intel_device_info intel_kabylake_info = {
HSW_FEATURES, BDW_FEATURES,
.is_preliminary = 1, .is_preliminary = 1,
.is_kabylake = 1, .is_kabylake = 1,
.gen = 9, .gen = 9,
}; };
static const struct intel_device_info intel_kabylake_gt3_info = { static const struct intel_device_info intel_kabylake_gt3_info = {
HSW_FEATURES, BDW_FEATURES,
.is_preliminary = 1, .is_preliminary = 1,
.is_kabylake = 1, .is_kabylake = 1,
.gen = 9, .gen = 9,
...@@ -504,6 +515,7 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -504,6 +515,7 @@ void intel_detect_pch(struct drm_device *dev)
WARN_ON(!IS_SKYLAKE(dev) && WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev)); !IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 && pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) { pch->subsystem_device == 0x1100)) {
...@@ -758,10 +770,10 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -758,10 +770,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
intel_display_resume(dev);
intel_dp_mst_resume(dev); intel_dp_mst_resume(dev);
intel_display_resume(dev);
/* /*
* ... but also need to make sure that hotplug processing * ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't * doesn't cause havoc. Like in the driver load code we don't
...@@ -881,7 +893,7 @@ int i915_reset(struct drm_device *dev) ...@@ -881,7 +893,7 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0; simulated = dev_priv->gpu_error.stop_rings != 0;
ret = intel_gpu_reset(dev); ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (simulated) { if (simulated) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev) ...@@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev)
static int warned; static int warned;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring; struct intel_engine_cs *engine;
int err = 0; int err = 0;
int i;
if (warned) if (warned)
return 0; return 0;
for_each_ring(ring, dev_priv, i) { for_each_engine(engine, dev_priv) {
list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) { list_for_each_entry(obj, &engine->active_list,
engine_list[engine->id]) {
if (obj->base.dev != dev || if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) { !atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n", DRM_ERROR("%s: freed active obj %p\n",
ring->name, obj); engine->name, obj);
err++; err++;
break; break;
} else if (!obj->active || } else if (!obj->active ||
obj->last_read_req[ring->id] == NULL) { obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n", DRM_ERROR("%s: invalid active obj %p\n",
ring->name, obj); engine->name, obj);
err++; err++;
} else if (obj->base.write_domain) { } else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n", DRM_ERROR("%s: invalid write obj %p (w %x)\n",
ring->name, engine->name,
obj, obj->base.write_domain); obj, obj->base.write_domain);
err++; err++;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -135,16 +135,13 @@ enum i915_ggtt_view_type { ...@@ -135,16 +135,13 @@ enum i915_ggtt_view_type {
}; };
struct intel_rotation_info { struct intel_rotation_info {
unsigned int height;
unsigned int pitch;
unsigned int uv_offset; unsigned int uv_offset;
uint32_t pixel_format; uint32_t pixel_format;
uint64_t fb_modifier;
unsigned int width_pages, height_pages;
uint64_t size;
unsigned int width_pages_uv, height_pages_uv;
uint64_t size_uv;
unsigned int uv_start_page; unsigned int uv_start_page;
struct {
/* tiles */
unsigned int width, height;
} plane[2];
}; };
struct i915_ggtt_view { struct i915_ggtt_view {
...@@ -342,13 +339,14 @@ struct i915_address_space { ...@@ -342,13 +339,14 @@ struct i915_address_space {
* and correct (in cases like swizzling). That region is referred to as GMADR in * and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec. * the spec.
*/ */
struct i915_gtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */ size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base; size_t stolen_reserved_base;
size_t stolen_reserved_size; size_t stolen_reserved_size;
size_t size; /* Total size of Global GTT */
u64 mappable_end; /* End offset that we can CPU map */ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */ phys_addr_t mappable_base; /* PA of our GMADR */
...@@ -360,10 +358,7 @@ struct i915_gtt { ...@@ -360,10 +358,7 @@ struct i915_gtt {
int mtrr; int mtrr;
/* global gtt ops */ int (*probe)(struct i915_ggtt *ggtt);
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
u64 *mappable_end);
}; };
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
......
...@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so) ...@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
drm_gem_object_unreference(&so->obj->base); drm_gem_object_unreference(&so->obj->base);
} }
int i915_gem_render_state_prepare(struct intel_engine_cs *ring, int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so) struct render_state *so)
{ {
int ret; int ret;
if (WARN_ON(ring->id != RCS)) if (WARN_ON(engine->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(so, ring->dev); ret = render_state_init(so, engine->dev);
if (ret) if (ret)
return ret; return ret;
...@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) ...@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
struct render_state so; struct render_state so;
int ret; int ret;
ret = i915_gem_render_state_prepare(req->ring, &so); ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret) if (ret)
return ret; return ret;
if (so.rodata == NULL) if (so.rodata == NULL)
return 0; return 0;
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset, ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4, so.rodata->batch_items * 4,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
if (ret) if (ret)
goto out; goto out;
if (so.aux_batch_size > 8) { if (so.aux_batch_size > 8) {
ret = req->ring->dispatch_execbuffer(req, ret = req->engine->dispatch_execbuffer(req,
(so.ggtt_offset + (so.ggtt_offset +
so.aux_batch_offset), so.aux_batch_offset),
so.aux_batch_size, so.aux_batch_size,
......
...@@ -43,7 +43,7 @@ struct render_state { ...@@ -43,7 +43,7 @@ struct render_state {
int i915_gem_render_state_init(struct drm_i915_gem_request *req); int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so); void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring, int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so); struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */ #endif /* _I915_GEM_RENDER_STATE_H_ */
...@@ -74,7 +74,7 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, ...@@ -74,7 +74,7 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
{ {
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0, alignment, 0,
dev_priv->gtt.stolen_usable_size); dev_priv->ggtt.stolen_usable_size);
} }
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
...@@ -134,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -134,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp); I85X_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size; base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_845G(dev)) { } else if (IS_845G(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -158,7 +158,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -158,7 +158,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size; base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_I830(dev)) { } else if (IS_I830(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size; base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} }
if (base == 0) if (base == 0)
...@@ -189,8 +189,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -189,8 +189,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct { struct {
u32 start, end; u32 start, end;
} stolen[2] = { } stolen[2] = {
{ .start = base, .end = base + dev_priv->gtt.stolen_size, }, { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
{ .start = base, .end = base + dev_priv->gtt.stolen_size, }, { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
}; };
u64 gtt_start, gtt_end; u64 gtt_start, gtt_end;
...@@ -200,7 +200,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -200,7 +200,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else else
gtt_start &= PGTBL_ADDRESS_LO_MASK; gtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start; stolen[0].end = gtt_start;
...@@ -211,10 +211,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -211,10 +211,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (stolen[0].end - stolen[0].start > if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) { stolen[1].end - stolen[1].start) {
base = stolen[0].start; base = stolen[0].start;
dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
} else { } else {
base = stolen[1].start; base = stolen[1].start;
dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
} }
if (stolen[0].start != stolen[1].start || if (stolen[0].start != stolen[1].start ||
...@@ -223,7 +223,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -223,7 +223,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(unsigned long long) gtt_start, (unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1); (unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->gtt.stolen_size - 1); base, base + (u32) dev_priv->ggtt.stolen_size - 1);
} }
} }
...@@ -233,7 +233,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -233,7 +233,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something * kernel. So if the region is already marked as busy, something
* is seriously wrong. * is seriously wrong.
*/ */
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
if (r == NULL) { if (r == NULL) {
/* /*
...@@ -245,7 +245,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -245,7 +245,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0. * reservation starting from 1 instead of 0.
*/ */
r = devm_request_mem_region(dev->dev, base + 1, r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1, dev_priv->ggtt.stolen_size - 1,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
/* /*
* GEN3 firmware likes to smash pci bridges into the stolen * GEN3 firmware likes to smash pci bridges into the stolen
...@@ -253,7 +253,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -253,7 +253,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/ */
if (r == NULL && !IS_GEN3(dev)) { if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size); base, base + (uint32_t)dev_priv->ggtt.stolen_size);
base = 0; base = 0;
} }
} }
...@@ -278,7 +278,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -278,7 +278,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
CTG_STOLEN_RESERVED : CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED); ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base + unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->gtt.stolen_size; dev_priv->ggtt.stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
...@@ -372,7 +372,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -372,7 +372,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top; unsigned long stolen_top;
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
...@@ -401,14 +401,14 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -401,14 +401,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
} }
#endif #endif
if (dev_priv->gtt.stolen_size == 0) if (dev_priv->ggtt.stolen_size == 0)
return 0; return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
case 2: case 2:
...@@ -458,18 +458,18 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -458,18 +458,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0; return 0;
} }
dev_priv->gtt.stolen_reserved_base = reserved_base; dev_priv->ggtt.stolen_reserved_base = reserved_base;
dev_priv->gtt.stolen_reserved_size = reserved_size; dev_priv->ggtt.stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen /* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */ * memory, so just consider the start. */
reserved_total = stolen_top - reserved_base; reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
dev_priv->gtt.stolen_size >> 10, dev_priv->ggtt.stolen_size >> 10,
(dev_priv->gtt.stolen_size - reserved_total) >> 10); (dev_priv->ggtt.stolen_size - reserved_total) >> 10);
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
reserved_total; reserved_total;
/* /*
...@@ -483,7 +483,7 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -483,7 +483,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later. * problem later.
*/ */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
return 0; return 0;
} }
...@@ -497,7 +497,7 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -497,7 +497,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg; struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->gtt.stolen_size - size); BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object /* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake * by wrapping the contiguous physical allocation with a fake
...@@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size) u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base; struct i915_address_space *ggtt = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -758,6 +758,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file ...@@ -758,6 +758,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int ret; int ret;
u32 handle; u32 handle;
if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
return -ENODEV;
}
if (args->flags & ~(I915_USERPTR_READ_ONLY | if (args->flags & ~(I915_USERPTR_READ_ONLY |
I915_USERPTR_UNSYNCHRONIZED)) I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL; return -EINVAL;
......
This diff is collapsed.
...@@ -377,11 +377,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc, ...@@ -377,11 +377,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client) struct i915_guc_client *client)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *ring; struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner; struct intel_context *ctx = client->owner;
struct guc_context_desc desc; struct guc_context_desc desc;
struct sg_table *sg; struct sg_table *sg;
int i; enum intel_engine_id id;
memset(&desc, 0, sizeof(desc)); memset(&desc, 0, sizeof(desc));
...@@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc, ...@@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority; desc.priority = client->priority;
desc.db_id = client->doorbell_id; desc.db_id = client->doorbell_id;
for_each_ring(ring, dev_priv, i) { for_each_engine_id(engine, dev_priv, id) {
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
uint64_t ctx_desc; uint64_t ctx_desc;
...@@ -402,27 +402,27 @@ static void guc_init_ctx_desc(struct intel_guc *guc, ...@@ -402,27 +402,27 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC * for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here. * client, need to make sure lrc is pinned prior to enter here.
*/ */
obj = ctx->engine[i].state; obj = ctx->engine[id].state;
if (!obj) if (!obj)
break; /* XXX: continue? */ break; /* XXX: continue? */
ctx_desc = intel_lr_context_descriptor(ctx, ring); ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc; lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */ /* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE; LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->guc_id << GUC_ELC_ENGINE_OFFSET); (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[i].ringbuf->obj; obj = ctx->engine[id].ringbuf->obj;
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1; lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0; lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << ring->guc_id); desc.engines_used |= (1 << engine->guc_id);
} }
WARN_ON(desc.engines_used == 0); WARN_ON(desc.engines_used == 0);
...@@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, ...@@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER | wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) | (wq_len << WQ_LEN_SHIFT) |
(rq->ring->guc_id << WQ_TARGET_SHIFT) | (rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT; WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */ /* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring); wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
rq->engine);
/* The GuC firmware wants the tail index in QWords, not bytes */ /* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->ringbuf->tail >> 3; tail = rq->ringbuf->tail >> 3;
...@@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client, ...@@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq) struct drm_i915_gem_request *rq)
{ {
struct intel_guc *guc = client->guc; struct intel_guc *guc = client->guc;
unsigned int engine_id = rq->ring->guc_id; unsigned int engine_id = rq->engine->guc_id;
int q_ret, b_ret; int q_ret, b_ret;
q_ret = guc_add_workqueue_item(client, rq); q_ret = guc_add_workqueue_item(client, rq);
...@@ -839,9 +840,9 @@ static void guc_create_ads(struct intel_guc *guc) ...@@ -839,9 +840,9 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads; struct guc_ads *ads;
struct guc_policies *policies; struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state; struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring; struct intel_engine_cs *engine;
struct page *page; struct page *page;
u32 size, i; u32 size;
/* The ads obj includes the struct itself and buffers passed to GuC */ /* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
...@@ -867,11 +868,11 @@ static void guc_create_ads(struct intel_guc *guc) ...@@ -867,11 +868,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where * so its address won't change after we've told the GuC where
* to find it. * to find it.
*/ */
ring = &dev_priv->ring[RCS]; engine = &dev_priv->engine[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr; ads->golden_context_lrca = engine->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i) for_each_engine(engine, dev_priv)
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring); ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */ /* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads); policies = (void *)ads + sizeof(struct guc_ads);
...@@ -883,12 +884,12 @@ static void guc_create_ads(struct intel_guc *guc) ...@@ -883,12 +884,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */ /* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies); reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_ring(ring, dev_priv, i) { for_each_engine(engine, dev_priv) {
reg_state->mmio_white_list[ring->guc_id].mmio_start = reg_state->mmio_white_list[engine->guc_id].mmio_start =
ring->mmio_base + GUC_MMIO_WHITE_LIST_START; engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */ /* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[ring->guc_id].count = 0; reg_state->mmio_white_list[engine->guc_id].count = 0;
} }
ads->reg_state_addr = ads->scheduler_policies + ads->reg_state_addr = ads->scheduler_policies +
......
This diff is collapsed.
...@@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = { ...@@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = {
.edp_vswing = 0, .edp_vswing = 0,
.enable_guc_submission = false, .enable_guc_submission = false,
.guc_log_level = -1, .guc_log_level = -1,
.enable_dp_mst = true,
.inject_load_failure = 0,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)") ...@@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)")
module_param_named(guc_log_level, i915.guc_log_level, int, 0400); module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level, MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); "GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
MODULE_PARM_DESC(enable_dp_mst,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
...@@ -49,6 +49,7 @@ struct i915_params { ...@@ -49,6 +49,7 @@ struct i915_params {
int use_mmio_flip; int use_mmio_flip;
int mmio_debug; int mmio_debug;
int edp_vswing; int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */ /* leave bools at the end to not create holes */
bool enable_hangcheck; bool enable_hangcheck;
bool fastboot; bool fastboot;
...@@ -59,6 +60,7 @@ struct i915_params { ...@@ -59,6 +60,7 @@ struct i915_params {
bool enable_guc_submission; bool enable_guc_submission;
bool verbose_state_checks; bool verbose_state_checks;
bool nuclear_pageflip; bool nuclear_pageflip;
bool enable_dp_mst;
}; };
extern struct i915_params i915 __read_mostly; extern struct i915_params i915 __read_mostly;
......
This diff is collapsed.
...@@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work); flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
...@@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) { val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL; return -EINVAL;
} }
...@@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count; return count;
} }
...@@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work); flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val); val = intel_freq_opcode(dev_priv, val);
...@@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) { val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL; return -EINVAL;
} }
...@@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count; return count;
} }
......
...@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign( TP_fast_assign(
__entry->dev = from->dev->primary->index; __entry->dev = from->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to_req->ring->id; __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
), ),
...@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = ring->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = ring->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(ring, req); i915_trace_irq_get(engine, req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
...@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush, ...@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = req->ring->dev->primary->index; __entry->dev = req->engine->dev->primary->index;
__entry->ring = req->ring->id; __entry->ring = req->engine->id;
__entry->invalidate = invalidate; __entry->invalidate = invalidate;
__entry->flush = flush; __entry->flush = flush;
), ),
...@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = ring->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = ring->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
), ),
...@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add, ...@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
); );
TRACE_EVENT(i915_gem_request_notify, TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring), TP_PROTO(struct intel_engine_cs *engine),
TP_ARGS(ring), TP_ARGS(engine),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
...@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify, ...@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = ring->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = ring->id; __entry->ring = engine->id;
__entry->seqno = ring->get_seqno(ring, false); __entry->seqno = engine->get_seqno(engine, false);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
...@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *ring = struct intel_engine_cs *engine =
i915_gem_request_get_ring(req); i915_gem_request_get_engine(req);
__entry->dev = ring->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = ring->id; __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex); mutex_is_locked(&engine->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
...@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free, ...@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled. * called only if full ppgtt is enabled.
*/ */
TRACE_EVENT(switch_mm, TRACE_EVENT(switch_mm,
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to), TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
TP_ARGS(ring, to), TP_ARGS(engine, to),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, ring) __field(u32, ring)
...@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm, ...@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ring = ring->id; __entry->ring = engine->id;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = ring->dev->primary->index; __entry->dev = engine->dev->primary->index;
), ),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
......
...@@ -181,7 +181,7 @@ static int vgt_balloon_space(struct drm_mm *mm, ...@@ -181,7 +181,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev) int intel_vgt_balloon(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
unsigned long mappable_base, mappable_size, mappable_end; unsigned long mappable_base, mappable_size, mappable_end;
...@@ -203,18 +203,18 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -203,18 +203,18 @@ int intel_vgt_balloon(struct drm_device *dev)
unmappable_base, unmappable_size / 1024); unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start || if (mappable_base < ggtt_vm->start ||
mappable_end > dev_priv->gtt.mappable_end || mappable_end > dev_priv->ggtt.mappable_end ||
unmappable_base < dev_priv->gtt.mappable_end || unmappable_base < dev_priv->ggtt.mappable_end ||
unmappable_end > ggtt_vm_end) { unmappable_end > ggtt_vm_end) {
DRM_ERROR("Invalid ballooning configuration!\n"); DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL; return -EINVAL;
} }
/* Unmappable graphic memory ballooning */ /* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->gtt.mappable_end) { if (unmappable_base > dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[2], &bl_info.space[2],
dev_priv->gtt.mappable_end, dev_priv->ggtt.mappable_end,
unmappable_base); unmappable_base);
if (ret) if (ret)
...@@ -244,11 +244,11 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -244,11 +244,11 @@ int intel_vgt_balloon(struct drm_device *dev)
goto err; goto err;
} }
if (mappable_end < dev_priv->gtt.mappable_end) { if (mappable_end < dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[1], &bl_info.space[1],
mappable_end, mappable_end,
dev_priv->gtt.mappable_end); dev_priv->ggtt.mappable_end);
if (ret) if (ret)
goto err; goto err;
......
...@@ -96,8 +96,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) ...@@ -96,8 +96,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_pipe = false; crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false; crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false; crtc_state->disable_cxsr = false;
crtc_state->wm_changed = false; crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false; crtc_state->fb_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
return &crtc_state->base; return &crtc_state->base;
} }
......
...@@ -195,12 +195,10 @@ static void intel_plane_atomic_update(struct drm_plane *plane, ...@@ -195,12 +195,10 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
struct intel_plane_state *intel_state = struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state); to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(old_state->state, crtc);
if (intel_state->visible) if (intel_state->visible)
intel_plane->update_plane(plane, intel_plane->update_plane(plane,
to_intel_crtc_state(crtc_state), to_intel_crtc_state(crtc->state),
intel_state); intel_state);
else else
intel_plane->disable_plane(plane, crtc); intel_plane->disable_plane(plane, crtc);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -516,9 +516,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, ...@@ -516,9 +516,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */ * underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
else else
end = dev_priv->gtt.stolen_usable_size; end = dev_priv->ggtt.stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that /* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well. * code changes, this code needs to change as well.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment