Commit db56f974 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Eliminate dual personality of i915_scratch_offset

Scratch vma lives under gt but the API used to work on i915. Make this
consistent by renaming the function to intel_gt_scratch_offset and make
it take struct intel_gt.

v2:
 * Move to intel_gt. (Chris)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-33-tvrtko.ursulin@linux.intel.com
parent f0c02c1b
...@@ -734,7 +734,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) ...@@ -734,7 +734,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
struct measure_breadcrumb *frame; struct measure_breadcrumb *frame;
int dw = -ENOMEM; int dw = -ENOMEM;
GEM_BUG_ON(!engine->i915->gt.scratch); GEM_BUG_ON(!engine->gt->scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL); frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) if (!frame)
......
...@@ -203,3 +203,41 @@ void intel_gt_chipset_flush(struct intel_gt *gt) ...@@ -203,3 +203,41 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
if (INTEL_GEN(gt->i915) < 6) if (INTEL_GEN(gt->i915) < 6)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
} }
int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
gt->scratch = vma;
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
}
void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(&gt->scratch, 0);
}
...@@ -21,4 +21,12 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -21,4 +21,12 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
void intel_gt_flush_ggtt_writes(struct intel_gt *gt); void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt); void intel_gt_chipset_flush(struct intel_gt *gt);
int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
void intel_gt_fini_scratch(struct intel_gt *gt);
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt)
{
return i915_ggtt_offset(gt->scratch);
}
#endif /* __INTEL_GT_H__ */ #endif /* __INTEL_GT_H__ */
...@@ -135,6 +135,7 @@ ...@@ -135,6 +135,7 @@
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_render_state.h" #include "i915_gem_render_state.h"
#include "i915_vgpu.h" #include "i915_vgpu.h"
...@@ -1756,7 +1757,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) ...@@ -1756,7 +1757,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
/* NB no one else is allowed to scribble over scratch + 256! */ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = i915_scratch_offset(engine->i915) + 256; *batch++ = intel_gt_scratch_offset(engine->gt) + 256;
*batch++ = 0; *batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1); *batch++ = MI_LOAD_REGISTER_IMM(1);
...@@ -1770,7 +1771,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) ...@@ -1770,7 +1771,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = i915_scratch_offset(engine->i915) + 256; *batch++ = intel_gt_scratch_offset(engine->gt) + 256;
*batch++ = 0; *batch++ = 0;
return batch; return batch;
...@@ -1807,7 +1808,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) ...@@ -1807,7 +1808,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE, PIPE_CONTROL_QW_WRITE,
i915_scratch_offset(engine->i915) + intel_gt_scratch_offset(engine->gt) +
2 * CACHELINE_BYTES); 2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
...@@ -2501,7 +2502,7 @@ static int gen8_emit_flush_render(struct i915_request *request, ...@@ -2501,7 +2502,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
u32 scratch_addr = u32 scratch_addr =
i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false; bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0; u32 *cs, flags = 0;
int len; int len;
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#include "gem/i915_gem_context.h" #include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_render_state.h" #include "i915_gem_render_state.h"
#include "i915_trace.h" #include "i915_trace.h"
...@@ -75,7 +77,7 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode) ...@@ -75,7 +77,7 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = cmd; *cs++ = cmd;
while (num_store_dw--) { while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = i915_scratch_offset(rq->i915); *cs++ = intel_gt_scratch_offset(rq->engine->gt);
*cs++ = 0; *cs++ = 0;
} }
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
...@@ -148,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) ...@@ -148,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/ */
if (mode & EMIT_INVALIDATE) { if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; *cs++ = 0;
*cs++ = 0; *cs++ = 0;
...@@ -156,7 +159,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) ...@@ -156,7 +159,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH; *cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; *cs++ = 0;
*cs++ = 0; *cs++ = 0;
} }
...@@ -208,7 +212,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) ...@@ -208,7 +212,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{ {
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
u32 *cs; u32 *cs;
cs = intel_ring_begin(rq, 6); cs = intel_ring_begin(rq, 6);
...@@ -241,7 +246,8 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) ...@@ -241,7 +246,8 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode) gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{ {
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0; u32 *cs, flags = 0;
int ret; int ret;
...@@ -299,7 +305,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) ...@@ -299,7 +305,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE; *cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; *cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */ /* Finally we can flush and with it emit the breadcrumb */
...@@ -342,7 +349,8 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) ...@@ -342,7 +349,8 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode) gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{ {
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0; u32 *cs, flags = 0;
/* /*
...@@ -1071,9 +1079,9 @@ i830_emit_bb_start(struct i915_request *rq, ...@@ -1071,9 +1079,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len, u64 offset, u32 len,
unsigned int dispatch_flags) unsigned int dispatch_flags)
{ {
u32 *cs, cs_offset = i915_scratch_offset(rq->i915); u32 *cs, cs_offset = intel_gt_scratch_offset(rq->engine->gt);
GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6); cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs)) if (IS_ERR(cs))
...@@ -1513,7 +1521,7 @@ static int flush_pd_dir(struct i915_request *rq) ...@@ -1513,7 +1521,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */ /* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = i915_scratch_offset(rq->i915); *cs++ = intel_gt_scratch_offset(rq->engine->gt);
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
...@@ -1629,7 +1637,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) ...@@ -1629,7 +1637,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */ /* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg); *cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = i915_scratch_offset(rq->i915); *cs++ = intel_gt_scratch_offset(rq->engine->gt);
*cs++ = MI_NOOP; *cs++ = MI_NOOP;
} }
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
......
...@@ -2783,11 +2783,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) ...@@ -2783,11 +2783,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX; return I915_HWS_CSB_WRITE_INDEX;
} }
static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
{
return i915_ggtt_offset(i915->gt.scratch);
}
static inline enum i915_map_type static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915) i915_coherent_map_type(struct drm_i915_private *i915)
{ {
......
...@@ -1424,39 +1424,12 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -1424,39 +1424,12 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
static int static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{ {
struct drm_i915_gem_object *obj; return intel_gt_init_scratch(&i915->gt, size);
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
i915->gt.scratch = vma;
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
} }
static void i915_gem_fini_scratch(struct drm_i915_private *i915) static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{ {
i915_vma_unpin_and_release(&i915->gt.scratch, 0); intel_gt_fini_scratch(&i915->gt);
} }
static int intel_engines_verify_workarounds(struct drm_i915_private *i915) static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
......
...@@ -1442,7 +1442,7 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1442,7 +1442,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915)) if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer = ee->wa_batchbuffer =
i915_error_object_create(i915, i915_error_object_create(i915,
i915->gt.scratch); engine->gt->scratch);
request_record_user_bo(request, ee); request_record_user_bo(request, ee);
ee->ctx = ee->ctx =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment