Commit 51d61207 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2016-08-22:
- bugfixes and cleanups for rcu-protected requests (Chris)
- atomic modeset fixes for gpu reset on pre-g4x (Maarten&Ville)
- guc submission improvements (Dave Gordon)
- panel power sequence cleanup (Imre)
- better use of stolen and unmappable ggtt (Chris), plus prep work to make that
  happen
- rework of framebuffer offsets, prep for multi-plane framebuffers (Ville)
- fully partial ggtt vmaps, including fenced ones (Chris)
- move lots more of the gem tracking from the object to the vma (Chris)
- tune the command parser (Chris)
- allow fbc without fences on recent platforms (Chris)
- fbc frontbuffer tracking fixes (Chris)
- fast prefaulting using io-mappping.h pgprot caching (Chris)

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (141 commits)
  io-mapping: Fixup for different names of writecombine
  io-mapping.h: s/PAGE_KERNEL_IO/PAGE_KERNEL/
  drm/i915: Update DRIVER_DATE to 20160822
  drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass
  drm/i915: Embed the io-mapping struct inside drm_i915_private
  io-mapping: Always create a struct to hold metadata about the io-mapping
  drm/i915/fbc: Allow on unfenced surfaces, for recent gen
  drm/i915/fbc: Don't set an illegal fence if unfenced
  drm/i915: Flush delayed fence releases after reset
  drm/i915: Reattach comment, complete type specification
  drm/i915/cmdparser: Accelerate copies from WC memory
  drm/i915/cmdparser: Use binary search for faster register lookup
  drm/i915/cmdparser: Check for SKIP descriptors first
  drm/i915/cmdparser: Compare against the previous command descriptor
  drm/i915/cmdparser: Improve hash function
  drm/i915/cmdparser: Only cache the dst vmap
  drm/i915/cmdparser: Use cached vmappings
  drm/i915/cmdparser: Add the TIMESTAMP register for the other engines
  drm/i915/cmdparser: Make initialisation failure non-fatal
  drm/i915: Stop discarding GTT cache-domain on unbind vma
  ...
parents 78acdd4a 35124389
...@@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func, ...@@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
static phys_addr_t __init i865_stolen_base(int num, int slot, int func, static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
size_t stolen_size) size_t stolen_size)
{ {
u16 toud; u16 toud = 0;
/*
* FIXME is the graphics stolen memory region
* always at TOUD? Ie. is it always the last
* one to be allocated by the BIOS?
*/
toud = read_pci_config_16(0, 0, 0, I865_TOUD); toud = read_pci_config_16(0, 0, 0, I865_TOUD);
return (phys_addr_t)toud << 16; return (phys_addr_t)(toud << 16) + i845_tseg_size();
} }
static phys_addr_t __init gen3_stolen_base(int num, int slot, int func, static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
......
...@@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr, ...@@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags) unsigned int flags)
{ {
intel_private.driver->write_entry(addr, pg, flags); intel_private.driver->write_entry(addr, pg, flags);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
} }
EXPORT_SYMBOL(intel_gtt_insert_page); EXPORT_SYMBOL(intel_gtt_insert_page);
......
...@@ -3,12 +3,16 @@ ...@@ -3,12 +3,16 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Please keep these build lists sorted! # Please keep these build lists sorted!
# core driver code # core driver code
i915-y := i915_drv.o \ i915-y := i915_drv.o \
i915_irq.o \ i915_irq.o \
i915_memcpy.o \
i915_mm.o \
i915_params.o \ i915_params.o \
i915_pci.o \ i915_pci.o \
i915_suspend.o \ i915_suspend.o \
......
This diff is collapsed.
This diff is collapsed.
...@@ -827,6 +827,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -827,6 +827,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex); mutex_init(&dev_priv->pps_mutex);
i915_memcpy_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv); ret = i915_workqueues_init(dev_priv);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1560,6 +1562,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1560,6 +1562,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev); i915_gem_resume(dev);
i915_restore_state(dev); i915_restore_state(dev);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv); intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev); intel_init_pch_refclk(dev);
......
This diff is collapsed.
This diff is collapsed.
...@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -155,9 +155,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring) if (ce->ring)
intel_ring_free(ce->ring); intel_ring_free(ce->ring);
i915_gem_object_put(ce->state); i915_vma_put(ce->state);
} }
put_pid(ctx->pid);
list_del(&ctx->link); list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
...@@ -281,13 +282,24 @@ __create_hw_context(struct drm_device *dev, ...@@ -281,13 +282,24 @@ __create_hw_context(struct drm_device *dev,
ctx->ggtt_alignment = get_context_alignment(dev_priv); ctx->ggtt_alignment = get_context_alignment(dev_priv);
if (dev_priv->hw_context_size) { if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj;
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); struct i915_vma *vma;
obj = i915_gem_alloc_context_obj(dev,
dev_priv->hw_context_size);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
ret = PTR_ERR(obj); ret = PTR_ERR(obj);
goto err_out; goto err_out;
} }
ctx->engine[RCS].state = obj;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
ret = PTR_ERR(vma);
goto err_out;
}
ctx->engine[RCS].state = vma;
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
...@@ -300,6 +312,9 @@ __create_hw_context(struct drm_device *dev, ...@@ -300,6 +312,9 @@ __create_hw_context(struct drm_device *dev,
ret = DEFAULT_CONTEXT_HANDLE; ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv; ctx->file_priv = file_priv;
if (file_priv)
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->user_handle = ret; ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there * loads it will restore whatever remap state already exists. If there
...@@ -399,7 +414,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, ...@@ -399,7 +414,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
if (ce->state) if (ce->state)
i915_gem_object_ggtt_unpin(ce->state); i915_vma_unpin(ce->state);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
...@@ -568,7 +583,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -568,7 +583,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
const int num_rings = const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */ /* Use an extended w/a on ivb+ if signalling from other rings */
i915.semaphores ? i915.semaphores ?
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : INTEL_INFO(dev_priv)->num_rings - 1 :
0; 0;
int len, ret; int len, ret;
...@@ -621,8 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -621,8 +636,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, intel_ring_emit(ring,
i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
flags);
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv * WaMiSetContext_Hang:snb,ivb,vlv
...@@ -651,7 +665,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -651,7 +665,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
MI_STORE_REGISTER_MEM | MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT); MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, last_reg); intel_ring_emit_reg(ring, last_reg);
intel_ring_emit(ring, engine->scratch.gtt_offset); intel_ring_emit(ring,
i915_ggtt_offset(engine->scratch));
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
} }
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
...@@ -755,6 +770,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -755,6 +770,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct i915_vma *vma = to->engine[RCS].state;
struct i915_gem_context *from; struct i915_gem_context *from;
u32 hw_flags; u32 hw_flags;
int ret, i; int ret, i;
...@@ -762,9 +778,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -762,9 +778,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to)) if (skip_rcs_switch(ppgtt, engine, to))
return 0; return 0;
/* Clear this page out of any CPU caches for coherent swap-in/out. */
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret)
return ret;
}
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0, ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
to->ggtt_alignment, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -777,18 +799,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -777,18 +799,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
*/ */
from = engine->last_context; from = engine->last_context;
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
*
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
if (ret)
goto unpin_out;
if (needs_pd_load_pre(ppgtt, engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load * "PP_DCLV followed by PP_DIR_BASE register through Load
...@@ -797,7 +807,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -797,7 +807,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
goto unpin_out; goto err;
} }
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
...@@ -814,7 +824,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -814,7 +824,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (to != from || (hw_flags & MI_FORCE_RESTORE)) { if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags); ret = mi_set_context(req, hw_flags);
if (ret) if (ret)
goto unpin_out; goto err;
} }
/* The backing object for the context is done after switching to the /* The backing object for the context is done after switching to the
...@@ -824,8 +834,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -824,8 +834,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
struct drm_i915_gem_object *obj = from->engine[RCS].state;
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -833,11 +841,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -833,11 +841,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; i915_vma_move_to_active(from->engine[RCS].state, req, 0);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0); /* state is kept alive until the next request */
i915_vma_unpin(from->engine[RCS].state);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from); i915_gem_context_put(from);
} }
engine->last_context = i915_gem_context_get(to); engine->last_context = i915_gem_context_get(to);
...@@ -882,8 +888,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -882,8 +888,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return 0; return 0;
unpin_out: err:
i915_gem_object_ggtt_unpin(to->engine[RCS].state); i915_vma_unpin(vma);
return ret; return ret;
} }
......
...@@ -119,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -119,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
addr = i915_gem_object_pin_map(obj); addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return addr; return addr;
......
...@@ -47,7 +47,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv) ...@@ -47,7 +47,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
} }
static bool static bool
mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
{ {
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
return false; return false;
...@@ -55,6 +55,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) ...@@ -55,6 +55,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
if (WARN_ON(!list_empty(&vma->exec_list))) if (WARN_ON(!list_empty(&vma->exec_list)))
return false; return false;
if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
return false;
list_add(&vma->exec_list, unwind); list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node); return drm_mm_scan_add_block(&vma->node);
} }
...@@ -129,7 +132,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -129,7 +132,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
phase = phases; phase = phases;
do { do {
list_for_each_entry(vma, *phase, vm_link) list_for_each_entry(vma, *phase, vm_link)
if (mark_free(vma, &eviction_list)) if (mark_free(vma, flags, &eviction_list))
goto found; goto found;
} while (*++phase); } while (*++phase);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -38,7 +38,13 @@ ...@@ -38,7 +38,13 @@
#include "i915_gem_request.h" #include "i915_gem_request.h"
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_file_private; struct drm_i915_file_private;
struct drm_i915_fence_reg;
typedef uint32_t gen6_pte_t; typedef uint32_t gen6_pte_t;
typedef uint64_t gen8_pte_t; typedef uint64_t gen8_pte_t;
...@@ -139,12 +145,9 @@ enum i915_ggtt_view_type { ...@@ -139,12 +145,9 @@ enum i915_ggtt_view_type {
}; };
struct intel_rotation_info { struct intel_rotation_info {
unsigned int uv_offset;
uint32_t pixel_format;
unsigned int uv_start_page;
struct { struct {
/* tiles */ /* tiles */
unsigned int width, height; unsigned int width, height, stride, offset;
} plane[2]; } plane[2];
}; };
...@@ -158,8 +161,6 @@ struct i915_ggtt_view { ...@@ -158,8 +161,6 @@ struct i915_ggtt_view {
} partial; } partial;
struct intel_rotation_info rotated; struct intel_rotation_info rotated;
} params; } params;
struct sg_table *pages;
}; };
extern const struct i915_ggtt_view i915_ggtt_view_normal; extern const struct i915_ggtt_view i915_ggtt_view_normal;
...@@ -179,8 +180,11 @@ struct i915_vma { ...@@ -179,8 +180,11 @@ struct i915_vma {
struct drm_mm_node node; struct drm_mm_node node;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
struct drm_i915_fence_reg *fence;
struct sg_table *pages;
void __iomem *iomap; void __iomem *iomap;
u64 size; u64 size;
u64 display_alignment;
unsigned int flags; unsigned int flags;
/** /**
...@@ -202,10 +206,12 @@ struct i915_vma { ...@@ -202,10 +206,12 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
#define I915_VMA_GGTT BIT(8) #define I915_VMA_GGTT BIT(8)
#define I915_VMA_CLOSED BIT(9) #define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
struct i915_gem_active last_fence;
/** /**
* Support different GGTT views into the same object. * Support different GGTT views into the same object.
...@@ -232,11 +238,22 @@ struct i915_vma { ...@@ -232,11 +238,22 @@ struct i915_vma {
struct drm_i915_gem_exec_object2 *exec_entry; struct drm_i915_gem_exec_object2 *exec_entry;
}; };
struct i915_vma *
i915_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_GGTT; return vma->flags & I915_VMA_GGTT;
} }
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CAN_FENCE;
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma) static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_CLOSED; return vma->flags & I915_VMA_CLOSED;
...@@ -270,6 +287,15 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, ...@@ -270,6 +287,15 @@ static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
return vma->active & BIT(engine); return vma->active & BIT(engine);
} }
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->node.allocated);
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
}
struct i915_page_dma { struct i915_page_dma {
struct page *page; struct page *page;
union { union {
...@@ -413,13 +439,13 @@ struct i915_address_space { ...@@ -413,13 +439,13 @@ struct i915_address_space {
*/ */
struct i915_ggtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
size_t stolen_size; /* Total size of stolen memory */ size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base; size_t stolen_reserved_base;
size_t stolen_reserved_size; size_t stolen_reserved_size;
u64 mappable_end; /* End offset that we can CPU map */ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */ phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */ /** "Graphics Stolen Memory" holds the global PTEs */
...@@ -608,24 +634,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev); ...@@ -608,24 +634,11 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
static inline bool
i915_ggtt_view_equal(const struct i915_ggtt_view *a,
const struct i915_ggtt_view *b)
{
if (WARN_ON(!a || !b))
return false;
if (a->type != b->type)
return false;
if (a->type != I915_GGTT_VIEW_NORMAL)
return !memcmp(&a->params, &b->params, sizeof(a->params));
return true;
}
/* Flags used by pin/bind&friends. */ /* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0) #define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1) #define PIN_MAPPABLE BIT(1)
#define PIN_ZONE_4G BIT(2) #define PIN_ZONE_4G BIT(2)
#define PIN_NONFAULT BIT(3)
#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
...@@ -715,4 +728,10 @@ static inline void i915_vma_unpin_iomap(struct i915_vma *vma) ...@@ -715,4 +728,10 @@ static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
static inline struct page *i915_vma_first_page(struct i915_vma *vma)
{
GEM_BUG_ON(!vma->pages);
return sg_page(vma->pages->sgl);
}
#endif #endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment