Commit af85f50d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Exercise manipulate of single pages in the GGTT

Move a single page of an object around within the GGTT and check
coherency of writes and reads.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170213171558.20942-45-chris@chris-wilson.co.uk
parent 210e8ac4
...@@ -903,6 +903,96 @@ static int igt_ggtt_lowlevel(void *arg) ...@@ -903,6 +903,96 @@ static int igt_ggtt_lowlevel(void *arg)
return exercise_ggtt(arg, lowlevel_hole); return exercise_ggtt(arg, lowlevel_hole);
} }
static int igt_ggtt_page(void *arg)
{
const unsigned int count = PAGE_SIZE/sizeof(u32);
I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node tmp;
unsigned int *order, n;
int err;
mutex_lock(&i915->drm.struct_mutex);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_unlock;
}
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1024 * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
if (err)
goto out_unpin;
order = i915_random_order(count, &prng);
if (!order) {
err = -ENOMEM;
goto out_remove;
}
for (n = 0; n < count; n++) {
u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr;
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
wmb();
ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
}
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr;
u32 val;
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr);
ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
if (val != n) {
pr_err("insert page failed: found %d, expected %d\n",
val, n);
err = -EINVAL;
break;
}
}
kfree(order);
out_remove:
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
i915_gem_object_put(obj);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static void track_vma_bind(struct i915_vma *vma) static void track_vma_bind(struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
...@@ -1360,6 +1450,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) ...@@ -1360,6 +1450,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_drunk), SUBTEST(igt_ggtt_drunk),
SUBTEST(igt_ggtt_walk), SUBTEST(igt_ggtt_walk),
SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
}; };
GEM_BUG_ON(offset_in_page(i915->ggtt.base.total)); GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment