Commit be1cb55a authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Keep a no-frills swappable copy of the default context state

We need to keep the default context state around to instantiate new
contexts (aka golden rendercontext), and we also keep it pinned while
the engine is active so that we can quickly reset a hanging context.
However, the default contexts are large enough to merit keeping in
swappable memory as opposed to kernel memory, so we store them inside
shmemfs. Currently, we use the normal GEM objects to create the default
context image, but we can throw away all but the shmemfs file.

This greatly simplifies the tricky power management code which wants to
run underneath the normal GT locking, and we definitely do not want to
use any high level objects that may appear to recurse back into the GT.
Though perhaps the primary advantage of the complex GEM object is that
we aggressively cache the mapping, but here we are recreating the
vm_area everytime time we unpark. At the worst, we add a lightweight
cache, but first find a microbenchmark that is impacted.

Having started to create some utility functions to make working with
shmemfs objects easier, we can start putting them to wider use, where
GEM objects are overkill, such as storing persistent error state.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200429172429.6054-1-chris@chris-wilson.co.uk
parent 8c35a195
...@@ -111,6 +111,7 @@ gt-y += \ ...@@ -111,6 +111,7 @@ gt-y += \
gt/intel_sseu.o \ gt/intel_sseu.o \
gt/intel_timeline.o \ gt/intel_timeline.o \
gt/intel_workarounds.o \ gt/intel_workarounds.o \
gt/shmem_utils.o \
gt/sysfs_engines.o gt/sysfs_engines.o
# autogenerated null render state # autogenerated null render state
gt-y += \ gt-y += \
......
...@@ -834,7 +834,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -834,7 +834,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_cleanup_cmd_parser(engine); intel_engine_cleanup_cmd_parser(engine);
if (engine->default_state) if (engine->default_state)
i915_gem_object_put(engine->default_state); fput(engine->default_state);
if (engine->kernel_context) { if (engine->kernel_context) {
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_rc6.h" #include "intel_rc6.h"
#include "intel_ring.h" #include "intel_ring.h"
#include "shmem_utils.h"
static int __engine_unpark(struct intel_wakeref *wf) static int __engine_unpark(struct intel_wakeref *wf)
{ {
...@@ -30,10 +31,8 @@ static int __engine_unpark(struct intel_wakeref *wf) ...@@ -30,10 +31,8 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Pin the default state for fast resets from atomic context. */ /* Pin the default state for fast resets from atomic context. */
map = NULL; map = NULL;
if (engine->default_state) if (engine->default_state)
map = i915_gem_object_pin_map(engine->default_state, map = shmem_pin_map(engine->default_state);
I915_MAP_WB); engine->pinned_default_state = map;
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
/* Discard stale context state from across idling */ /* Discard stale context state from across idling */
ce = engine->kernel_context; ce = engine->kernel_context;
...@@ -264,7 +263,8 @@ static int __engine_park(struct intel_wakeref *wf) ...@@ -264,7 +263,8 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine); engine->park(engine);
if (engine->pinned_default_state) { if (engine->pinned_default_state) {
i915_gem_object_unpin_map(engine->default_state); shmem_unpin_map(engine->default_state,
engine->pinned_default_state);
engine->pinned_default_state = NULL; engine->pinned_default_state = NULL;
} }
......
...@@ -339,7 +339,7 @@ struct intel_engine_cs { ...@@ -339,7 +339,7 @@ struct intel_engine_cs {
unsigned long wakeref_serial; unsigned long wakeref_serial;
struct intel_wakeref wakeref; struct intel_wakeref wakeref;
struct drm_i915_gem_object *default_state; struct file *default_state;
void *pinned_default_state; void *pinned_default_state;
struct { struct {
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "intel_rps.h" #include "intel_rps.h"
#include "intel_uncore.h" #include "intel_uncore.h"
#include "intel_pm.h" #include "intel_pm.h"
#include "shmem_utils.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{ {
...@@ -371,18 +372,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt) ...@@ -371,18 +372,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt)
return i915_vm_get(&gt->ggtt->vm); return i915_vm_get(&gt->ggtt->vm);
} }
static int __intel_context_flush_retire(struct intel_context *ce)
{
struct intel_timeline *tl;
tl = intel_context_timeline_lock(ce);
if (IS_ERR(tl))
return PTR_ERR(tl);
intel_context_timeline_unlock(tl);
return 0;
}
static int __engines_record_defaults(struct intel_gt *gt) static int __engines_record_defaults(struct intel_gt *gt)
{ {
struct i915_request *requests[I915_NUM_ENGINES] = {}; struct i915_request *requests[I915_NUM_ENGINES] = {};
...@@ -448,8 +437,7 @@ static int __engines_record_defaults(struct intel_gt *gt) ...@@ -448,8 +437,7 @@ static int __engines_record_defaults(struct intel_gt *gt)
for (id = 0; id < ARRAY_SIZE(requests); id++) { for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *state; struct file *state;
void *vaddr;
rq = requests[id]; rq = requests[id];
if (!rq) if (!rq)
...@@ -461,48 +449,16 @@ static int __engines_record_defaults(struct intel_gt *gt) ...@@ -461,48 +449,16 @@ static int __engines_record_defaults(struct intel_gt *gt)
} }
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
state = rq->context->state; if (!rq->context->state)
if (!state)
continue; continue;
/* Serialise with retirement on another CPU */ /* Keep a copy of the state's backing pages; free the obj */
GEM_BUG_ON(!i915_request_completed(rq)); state = shmem_create_from_object(rq->context->state->obj);
err = __intel_context_flush_retire(rq->context); if (IS_ERR(state)) {
if (err) err = PTR_ERR(state);
goto out;
/* We want to be able to unbind the state from the GGTT */
GEM_BUG_ON(intel_context_is_pinned(rq->context));
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
* object will hold onto its vma (making it possible for a
* stray GTT write to corrupt our defaults). Unmap the vma
* from the GTT to prevent such accidents and reclaim the
* space.
*/
err = i915_vma_unbind(state);
if (err)
goto out;
i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj);
if (err)
goto out;
i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out; goto out;
} }
rq->engine->default_state = state;
rq->engine->default_state = i915_gem_object_get(state->obj);
i915_gem_object_unpin_map(state->obj);
} }
out: out:
......
...@@ -147,6 +147,7 @@ ...@@ -147,6 +147,7 @@
#include "intel_reset.h" #include "intel_reset.h"
#include "intel_ring.h" #include "intel_ring.h"
#include "intel_workarounds.h" #include "intel_workarounds.h"
#include "shmem_utils.h"
#define RING_EXECLIST_QFULL (1 << 0x2) #define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3) #define RING_EXECLIST1_VALID (1 << 0x3)
...@@ -5083,30 +5084,18 @@ populate_lr_context(struct intel_context *ce, ...@@ -5083,30 +5084,18 @@ populate_lr_context(struct intel_context *ce,
{ {
bool inhibit = true; bool inhibit = true;
void *vaddr; void *vaddr;
int ret;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr); drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
drm_dbg(&engine->i915->drm, return PTR_ERR(vaddr);
"Could not map object pages! (%d)\n", ret);
return ret;
} }
set_redzone(vaddr, engine); set_redzone(vaddr, engine);
if (engine->default_state) { if (engine->default_state) {
void *defaults; shmem_read(engine->default_state, 0,
vaddr, engine->context_size);
defaults = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
if (IS_ERR(defaults)) {
ret = PTR_ERR(defaults);
goto err_unpin_ctx;
}
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
__set_bit(CONTEXT_VALID_BIT, &ce->flags); __set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false; inhibit = false;
} }
...@@ -5121,11 +5110,9 @@ populate_lr_context(struct intel_context *ce, ...@@ -5121,11 +5110,9 @@ populate_lr_context(struct intel_context *ce,
execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
ce, engine, ring, inhibit); ce, engine, ring, inhibit);
ret = 0;
err_unpin_ctx:
__i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj); i915_gem_object_unpin_map(ctx_obj);
return ret; return 0;
} }
static int __execlists_context_alloc(struct intel_context *ce, static int __execlists_context_alloc(struct intel_context *ce,
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "intel_reset.h" #include "intel_reset.h"
#include "intel_ring.h" #include "intel_ring.h"
#include "intel_workarounds.h" #include "intel_workarounds.h"
#include "shmem_utils.h"
/* Rough estimate of the typical request size, performing a flush, /* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch. * set-context and then emitting the batch.
...@@ -1241,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1241,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
if (engine->default_state) { if (engine->default_state) {
void *defaults, *vaddr; void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
...@@ -1249,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1249,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
goto err_obj; goto err_obj;
} }
defaults = i915_gem_object_pin_map(engine->default_state, shmem_read(engine->default_state, 0,
I915_MAP_WB); vaddr, engine->context_size);
if (IS_ERR(defaults)) {
err = PTR_ERR(defaults);
goto err_map;
}
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
i915_gem_object_flush_map(obj); i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
...@@ -1271,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1271,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
return vma; return vma;
err_map:
i915_gem_object_unpin_map(obj);
err_obj: err_obj:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -155,7 +155,7 @@ static int live_context_size(void *arg) ...@@ -155,7 +155,7 @@ static int live_context_size(void *arg)
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct { struct {
struct drm_i915_gem_object *state; struct file *state;
void *pinned; void *pinned;
} saved; } saved;
......
...@@ -4452,8 +4452,7 @@ static int live_lrc_layout(void *arg) ...@@ -4452,8 +4452,7 @@ static int live_lrc_layout(void *arg)
if (!engine->default_state) if (!engine->default_state)
continue; continue;
hw = i915_gem_object_pin_map(engine->default_state, hw = shmem_pin_map(engine->default_state);
I915_MAP_WB);
if (IS_ERR(hw)) { if (IS_ERR(hw)) {
err = PTR_ERR(hw); err = PTR_ERR(hw);
break; break;
...@@ -4525,7 +4524,7 @@ static int live_lrc_layout(void *arg) ...@@ -4525,7 +4524,7 @@ static int live_lrc_layout(void *arg)
hexdump(lrc, PAGE_SIZE); hexdump(lrc, PAGE_SIZE);
} }
i915_gem_object_unpin_map(engine->default_state); shmem_unpin_map(engine->default_state, hw);
if (err) if (err)
break; break;
} }
...@@ -4630,8 +4629,7 @@ static int live_lrc_fixed(void *arg) ...@@ -4630,8 +4629,7 @@ static int live_lrc_fixed(void *arg)
if (!engine->default_state) if (!engine->default_state)
continue; continue;
hw = i915_gem_object_pin_map(engine->default_state, hw = shmem_pin_map(engine->default_state);
I915_MAP_WB);
if (IS_ERR(hw)) { if (IS_ERR(hw)) {
err = PTR_ERR(hw); err = PTR_ERR(hw);
break; break;
...@@ -4652,7 +4650,7 @@ static int live_lrc_fixed(void *arg) ...@@ -4652,7 +4650,7 @@ static int live_lrc_fixed(void *arg)
} }
} }
i915_gem_object_unpin_map(engine->default_state); shmem_unpin_map(engine->default_state, hw);
} }
return err; return err;
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include "gem/i915_gem_object.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
struct file *file;
int err;
file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
if (IS_ERR(file))
return file;
err = shmem_write(file, 0, data, len);
if (err) {
fput(file);
return ERR_PTR(err);
}
return file;
}
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
struct file *file;
void *ptr;
if (obj->ops == &i915_gem_shmem_ops) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
}
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
file = shmem_create_from_data("", ptr, obj->base.size);
i915_gem_object_unpin_map(obj);
return file;
}
static size_t shmem_npte(struct file *file)
{
return file->f_mapping->host->i_size >> PAGE_SHIFT;
}
static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
{
unsigned long pfn;
vunmap(ptr);
for (pfn = 0; pfn < n_pte; pfn++) {
struct page *page;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (!WARN_ON(IS_ERR(page))) {
put_page(page);
put_page(page);
}
}
}
void *shmem_pin_map(struct file *file)
{
const size_t n_pte = shmem_npte(file);
pte_t *stack[32], **ptes, **mem;
struct vm_struct *area;
unsigned long pfn;
mem = stack;
if (n_pte > ARRAY_SIZE(stack)) {
mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
if (!mem)
return NULL;
}
area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
if (!area) {
if (mem != stack)
kvfree(mem);
return NULL;
}
ptes = mem;
for (pfn = 0; pfn < n_pte; pfn++) {
struct page *page;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
goto err_page;
**ptes++ = mk_pte(page, PAGE_KERNEL);
}
if (mem != stack)
kvfree(mem);
mapping_set_unevictable(file->f_mapping);
return area->addr;
err_page:
if (mem != stack)
kvfree(mem);
__shmem_unpin_map(file, area->addr, pfn);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
mapping_clear_unevictable(file->f_mapping);
__shmem_unpin_map(file, ptr, shmem_npte(file));
}
static int __shmem_rw(struct file *file, loff_t off,
void *ptr, size_t len,
bool write)
{
unsigned long pfn;
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
unsigned int this =
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
struct page *page;
void *vaddr;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap(page);
if (write)
memcpy(vaddr + offset_in_page(off), ptr, this);
else
memcpy(ptr, vaddr + offset_in_page(off), this);
kunmap(page);
put_page(page);
len -= this;
ptr += this;
off = 0;
}
return 0;
}
int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
{
return __shmem_rw(file, off, dst, len, false);
}
int shmem_write(struct file *file, loff_t off, void *src, size_t len)
{
return __shmem_rw(file, off, src, len, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "st_shmem_utils.c"
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef SHMEM_UTILS_H
#define SHMEM_UTILS_H
#include <linux/types.h>
struct drm_i915_gem_object;
struct file;
struct file *shmem_create_from_data(const char *name, void *data, size_t len);
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
void *shmem_pin_map(struct file *file);
void shmem_unpin_map(struct file *file, void *ptr);
int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
int shmem_write(struct file *file, loff_t off, void *src, size_t len);
#endif /* SHMEM_UTILS_H */
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
/* Just a quick and causal check of the shmem_utils API */
static int igt_shmem_basic(void *ignored)
{
u32 datum = 0xdeadbeef, result;
struct file *file;
u32 *map;
int err;
file = shmem_create_from_data("mock", &datum, sizeof(datum));
if (IS_ERR(file))
return PTR_ERR(file);
result = 0;
err = shmem_read(file, 0, &result, sizeof(result));
if (err)
goto out_file;
if (result != datum) {
pr_err("Incorrect read back from shmemfs: %x != %x\n",
result, datum);
err = -EINVAL;
goto out_file;
}
result = 0xc0ffee;
err = shmem_write(file, 0, &result, sizeof(result));
if (err)
goto out_file;
map = shmem_pin_map(file);
if (!map) {
err = -ENOMEM;
goto out_file;
}
if (*map != result) {
pr_err("Incorrect read back via mmap of last write: %x != %x\n",
*map, result);
err = -EINVAL;
goto out_map;
}
out_map:
shmem_unpin_map(file, map);
out_file:
fput(file);
return err;
}
int shmem_utils_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shmem_basic),
};
return i915_subtests(tests, NULL);
}
...@@ -1320,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture, ...@@ -1320,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture,
return capture; return capture;
} }
static struct i915_vma_coredump *
capture_object(const struct intel_gt *gt,
struct drm_i915_gem_object *obj,
const char *name,
struct i915_vma_compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
.node = { .start = U64_MAX, .size = obj->base.size },
.size = obj->base.size,
.pages = obj->mm.pages,
.obj = obj,
};
return i915_vma_coredump_create(gt, &fake, name, compress);
} else {
return NULL;
}
}
static void add_vma(struct intel_engine_coredump *ee, static void add_vma(struct intel_engine_coredump *ee,
struct i915_vma_coredump *vma) struct i915_vma_coredump *vma)
{ {
...@@ -1428,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, ...@@ -1428,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
engine->wa_ctx.vma, engine->wa_ctx.vma,
"WA context", "WA context",
compress)); compress));
add_vma(ee,
capture_object(engine->gt,
engine->default_state,
"NULL context",
compress));
} }
static struct intel_engine_coredump * static struct intel_engine_coredump *
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* Tests are executed in order by igt/drv_selftest * Tests are executed in order by igt/drv_selftest
*/ */
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */ selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
selftest(shmem, shmem_utils_mock_selftests)
selftest(fence, i915_sw_fence_mock_selftests) selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment