Commit e26eed4f authored by Jason Gunthorpe's avatar Jason Gunthorpe

iommufd: Add some fault injection points

This increases the coverage the fail_nth test gets, as well as via
syzkaller.

Link: https://lore.kernel.org/r/17-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f4b20bb3
...@@ -102,6 +102,9 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id, ...@@ -102,6 +102,9 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
{ {
struct iommufd_object *obj; struct iommufd_object *obj;
if (iommufd_should_fail())
return ERR_PTR(-ENOENT);
xa_lock(&ictx->objects); xa_lock(&ictx->objects);
obj = xa_load(&ictx->objects, id); obj = xa_load(&ictx->objects, id);
if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) || if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
......
...@@ -80,6 +80,10 @@ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len) ...@@ -80,6 +80,10 @@ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len)
if (*size < backup_len) if (*size < backup_len)
return backup; return backup;
if (!backup && iommufd_should_fail())
return NULL;
*size = min_t(size_t, *size, TEMP_MEMORY_LIMIT); *size = min_t(size_t, *size, TEMP_MEMORY_LIMIT);
res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (res) if (res)
...@@ -544,6 +548,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index, ...@@ -544,6 +548,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
unsigned long last_index, struct page **pages) unsigned long last_index, struct page **pages)
{ {
struct page **end_pages = pages + (last_index - start_index) + 1; struct page **end_pages = pages + (last_index - start_index) + 1;
struct page **half_pages = pages + (end_pages - pages) / 2;
XA_STATE(xas, xa, start_index); XA_STATE(xas, xa, start_index);
do { do {
...@@ -551,6 +556,15 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index, ...@@ -551,6 +556,15 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
xas_lock(&xas); xas_lock(&xas);
while (pages != end_pages) { while (pages != end_pages) {
/* xarray does not participate in fault injection */
if (pages == half_pages && iommufd_should_fail()) {
xas_set_err(&xas, -EINVAL);
xas_unlock(&xas);
/* aka xas_destroy() */
xas_nomem(&xas, GFP_KERNEL);
goto err_clear;
}
old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
if (xas_error(&xas)) if (xas_error(&xas))
break; break;
...@@ -561,6 +575,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index, ...@@ -561,6 +575,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
xas_unlock(&xas); xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL)); } while (xas_nomem(&xas, GFP_KERNEL));
err_clear:
if (xas_error(&xas)) { if (xas_error(&xas)) {
if (xas.xa_index != start_index) if (xas.xa_index != start_index)
clear_xarray(xa, start_index, xas.xa_index - 1); clear_xarray(xa, start_index, xas.xa_index - 1);
...@@ -728,6 +743,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, ...@@ -728,6 +743,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
npages = min_t(unsigned long, last_index - start_index + 1, npages = min_t(unsigned long, last_index - start_index + 1,
user->upages_len / sizeof(*user->upages)); user->upages_len / sizeof(*user->upages));
if (iommufd_should_fail())
return -EFAULT;
uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
if (!remote_mm) if (!remote_mm)
rc = pin_user_pages_fast(uptr, npages, user->gup_flags, rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
...@@ -872,6 +891,8 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user, ...@@ -872,6 +891,8 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
npages = pages->last_npinned - pages->npinned; npages = pages->last_npinned - pages->npinned;
inc = false; inc = false;
} else { } else {
if (iommufd_should_fail())
return -ENOMEM;
npages = pages->npinned - pages->last_npinned; npages = pages->npinned - pages->last_npinned;
inc = true; inc = true;
} }
...@@ -1721,6 +1742,11 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, ...@@ -1721,6 +1742,11 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
return iopt_pages_rw_slow(pages, index, index, offset, data, return iopt_pages_rw_slow(pages, index, index, offset, data,
length, flags); length, flags);
if (iommufd_should_fail()) {
rc = -EINVAL;
goto out_mmput;
}
mmap_read_lock(pages->source_mm); mmap_read_lock(pages->source_mm);
rc = pin_user_pages_remote( rc = pin_user_pages_remote(
pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment