Commit 7dc8f114 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Drop relocation slowpath

Since the relocations are no longer performed under a global
struct_mutex, or any other lock, that is also held by pagefault handlers,
we can relax and allow our fast path to take a fault. As we no longer
need to abort the fast path for lock avoidance, we no longer need the
slow path handling at all.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200311160310.26711-1-chris@chris-wilson.co.uk
parent 07bcfd12
......@@ -1440,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
pagefault_disable();
copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
pagefault_enable();
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
......@@ -1492,236 +1490,6 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
return remain;
}
static int
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
{
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
struct drm_i915_gem_relocation_entry *relocs =
u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
unsigned int i;
int err;
for (i = 0; i < entry->relocation_count; i++) {
u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
if ((s64)offset < 0) {
err = (int)offset;
goto err;
}
}
err = 0;
err:
reloc_cache_reset(&eb->reloc_cache);
return err;
}
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
{
const char __user *addr, *end;
unsigned long size;
char __maybe_unused c;
size = entry->relocation_count;
if (size == 0)
return 0;
if (size > N_RELOC(ULONG_MAX))
return -EINVAL;
addr = u64_to_user_ptr(entry->relocs_ptr);
size *= sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(addr, size))
return -EFAULT;
end = addr + size;
for (; addr < end; addr += PAGE_SIZE) {
int err = __get_user(c, addr);
if (err)
return err;
}
return __get_user(c, end - 1);
}
static int eb_copy_relocations(const struct i915_execbuffer *eb)
{
struct drm_i915_gem_relocation_entry *relocs;
const unsigned int count = eb->buffer_count;
unsigned int i;
int err;
for (i = 0; i < count; i++) {
const unsigned int nreloc = eb->exec[i].relocation_count;
struct drm_i915_gem_relocation_entry __user *urelocs;
unsigned long size;
unsigned long copied;
if (nreloc == 0)
continue;
err = check_relocations(&eb->exec[i]);
if (err)
goto err;
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
relocs = kvmalloc_array(size, 1, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
}
/* copy_from_user is limited to < 4GiB */
copied = 0;
do {
unsigned int len =
min_t(u64, BIT_ULL(31), size - copied);
if (__copy_from_user((char *)relocs + copied,
(char __user *)urelocs + copied,
len))
goto end;
copied += len;
} while (copied < size);
/*
* As we do not update the known relocation offsets after
* relocating (due to the complexities in lock handling),
* we need to mark them as invalid now so that we force the
* relocation processing next time. Just in case the target
* object is evicted and then rebound into its old
* presumed_offset before the next execbuffer - if that
* happened we would make the mistake of assuming that the
* relocations were valid.
*/
if (!user_access_begin(urelocs, size))
goto end;
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
end_user);
user_access_end();
eb->exec[i].relocs_ptr = (uintptr_t)relocs;
}
return 0;
end_user:
user_access_end();
end:
kvfree(relocs);
err = -EFAULT;
err:
while (i--) {
relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
if (eb->exec[i].relocation_count)
kvfree(relocs);
}
return err;
}
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i;
for (i = 0; i < count; i++) {
int err;
err = check_relocations(&eb->exec[i]);
if (err)
return err;
}
return 0;
}
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
{
bool have_copy = false;
struct eb_vma *ev;
int err = 0;
repeat:
if (signal_pending(current)) {
err = -ERESTARTSYS;
goto out;
}
/*
* We take 3 passes through the slowpatch.
*
* 1 - we try to just prefault all the user relocation entries and
* then attempt to reuse the atomic pagefault disabled fast path again.
*
* 2 - we copy the user entries to a local buffer here outside of the
* local and allow ourselves to wait upon any rendering before
* relocations
*
* 3 - we already have a local copy of the relocation entries, but
* were interrupted (EAGAIN) whilst waiting for the objects, try again.
*/
if (!err) {
err = eb_prefault_relocations(eb);
} else if (!have_copy) {
err = eb_copy_relocations(eb);
have_copy = err == 0;
} else {
cond_resched();
err = 0;
}
if (err)
goto out;
list_for_each_entry(ev, &eb->relocs, reloc_link) {
if (!have_copy) {
pagefault_disable();
err = eb_relocate_vma(eb, ev);
pagefault_enable();
if (err)
goto repeat;
} else {
err = eb_relocate_vma_slow(eb, ev);
if (err)
goto err;
}
}
/*
* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
if (err == -EAGAIN)
goto repeat;
out:
if (have_copy) {
const unsigned int count = eb->buffer_count;
unsigned int i;
for (i = 0; i < count; i++) {
const struct drm_i915_gem_exec_object2 *entry =
&eb->exec[i];
struct drm_i915_gem_relocation_entry *relocs;
if (!entry->relocation_count)
continue;
relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
kvfree(relocs);
}
}
return err;
}
static int eb_relocate(struct i915_execbuffer *eb)
{
int err;
......@@ -1743,8 +1511,9 @@ static int eb_relocate(struct i915_execbuffer *eb)
struct eb_vma *ev;
list_for_each_entry(ev, &eb->relocs, reloc_link) {
if (eb_relocate_vma(eb, ev))
return eb_relocate_slow(eb);
err = eb_relocate_vma(eb, ev);
if (err)
return err;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment