Commit 52f52858 authored by Jason Gunthorpe's avatar Jason Gunthorpe

iommufd: Add additional invariant assertions

These are on performance paths so we protect them using the
CONFIG_IOMMUFD_TEST to not take a hit during normal operation.

These are useful when running the test suite and syzkaller to find data
structure inconsistencies early.

Link: https://lore.kernel.org/r/18-v6-a196d26f289e+11787-iommufd_jgg@nvidia.comTested-by: default avatarYi Liu <yi.l.liu@intel.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent e26eed4f
...@@ -625,6 +625,11 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, ...@@ -625,6 +625,11 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
struct iopt_area *area; struct iopt_area *area;
int rc; int rc;
/* Driver's ops don't support pin_pages */
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
return -EINVAL;
if (!length) if (!length)
return -EINVAL; return -EINVAL;
if (check_add_overflow(iova, length - 1, &last_iova)) if (check_add_overflow(iova, length - 1, &last_iova))
......
...@@ -251,6 +251,11 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt, ...@@ -251,6 +251,11 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
(uintptr_t)elm->pages->uptr + elm->start_byte, length); (uintptr_t)elm->pages->uptr + elm->start_byte, length);
if (rc) if (rc)
goto out_unlock; goto out_unlock;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) {
rc = -EINVAL;
goto out_unlock;
}
} else { } else {
rc = iopt_check_iova(iopt, *dst_iova, length); rc = iopt_check_iova(iopt, *dst_iova, length);
if (rc) if (rc)
...@@ -277,6 +282,8 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt, ...@@ -277,6 +282,8 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
static void iopt_abort_area(struct iopt_area *area) static void iopt_abort_area(struct iopt_area *area)
{ {
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(area->pages);
if (area->iopt) { if (area->iopt) {
down_write(&area->iopt->iova_rwsem); down_write(&area->iopt->iova_rwsem);
interval_tree_remove(&area->node, &area->iopt->area_itree); interval_tree_remove(&area->node, &area->iopt->area_itree);
...@@ -642,6 +649,9 @@ void iopt_destroy_table(struct io_pagetable *iopt) ...@@ -642,6 +649,9 @@ void iopt_destroy_table(struct io_pagetable *iopt)
{ {
struct interval_tree_node *node; struct interval_tree_node *node;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
iopt_remove_reserved_iova(iopt, NULL);
while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0, while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
ULONG_MAX))) { ULONG_MAX))) {
interval_tree_remove(node, &iopt->allowed_itree); interval_tree_remove(node, &iopt->allowed_itree);
...@@ -688,6 +698,8 @@ static void iopt_unfill_domain(struct io_pagetable *iopt, ...@@ -688,6 +698,8 @@ static void iopt_unfill_domain(struct io_pagetable *iopt,
continue; continue;
mutex_lock(&pages->mutex); mutex_lock(&pages->mutex);
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(!area->storage_domain);
if (area->storage_domain == domain) if (area->storage_domain == domain)
area->storage_domain = storage_domain; area->storage_domain = storage_domain;
mutex_unlock(&pages->mutex); mutex_unlock(&pages->mutex);
...@@ -792,6 +804,16 @@ static int iopt_check_iova_alignment(struct io_pagetable *iopt, ...@@ -792,6 +804,16 @@ static int iopt_check_iova_alignment(struct io_pagetable *iopt,
(iopt_area_length(area) & align_mask) || (iopt_area_length(area) & align_mask) ||
(area->page_offset & align_mask)) (area->page_offset & align_mask))
return -EADDRINUSE; return -EADDRINUSE;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) {
struct iommufd_access *access;
unsigned long index;
xa_for_each(&iopt->access_list, index, access)
if (WARN_ON(access->iova_alignment >
new_iova_alignment))
return -EADDRINUSE;
}
return 0; return 0;
} }
......
...@@ -101,6 +101,9 @@ static inline size_t iopt_area_length(struct iopt_area *area) ...@@ -101,6 +101,9 @@ static inline size_t iopt_area_length(struct iopt_area *area)
static inline unsigned long iopt_area_start_byte(struct iopt_area *area, static inline unsigned long iopt_area_start_byte(struct iopt_area *area,
unsigned long iova) unsigned long iova)
{ {
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(iova < iopt_area_iova(area) ||
iova > iopt_area_last_iova(area));
return (iova - iopt_area_iova(area)) + area->page_offset + return (iova - iopt_area_iova(area)) + area->page_offset +
iopt_area_index(area) * PAGE_SIZE; iopt_area_index(area) * PAGE_SIZE;
} }
......
...@@ -162,12 +162,20 @@ void interval_tree_double_span_iter_next( ...@@ -162,12 +162,20 @@ void interval_tree_double_span_iter_next(
static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
{ {
pages->npinned += npages; int rc;
rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(rc || pages->npinned > pages->npages);
} }
static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
{ {
pages->npinned -= npages; int rc;
rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(rc || pages->npinned > pages->npages);
} }
static void iopt_pages_err_unpin(struct iopt_pages *pages, static void iopt_pages_err_unpin(struct iopt_pages *pages,
...@@ -189,6 +197,9 @@ static void iopt_pages_err_unpin(struct iopt_pages *pages, ...@@ -189,6 +197,9 @@ static void iopt_pages_err_unpin(struct iopt_pages *pages,
static unsigned long iopt_area_index_to_iova(struct iopt_area *area, static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
unsigned long index) unsigned long index)
{ {
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(index < iopt_area_index(area) ||
index > iopt_area_last_index(area));
index -= iopt_area_index(area); index -= iopt_area_index(area);
if (index == 0) if (index == 0)
return iopt_area_iova(area); return iopt_area_iova(area);
...@@ -198,6 +209,9 @@ static unsigned long iopt_area_index_to_iova(struct iopt_area *area, ...@@ -198,6 +209,9 @@ static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area, static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area,
unsigned long index) unsigned long index)
{ {
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(index < iopt_area_index(area) ||
index > iopt_area_last_index(area));
if (index == iopt_area_last_index(area)) if (index == iopt_area_last_index(area))
return iopt_area_last_iova(area); return iopt_area_last_iova(area);
return iopt_area_iova(area) - area->page_offset + return iopt_area_iova(area) - area->page_offset +
...@@ -286,6 +300,8 @@ static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) ...@@ -286,6 +300,8 @@ static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
{ {
if (!batch->total_pfns) if (!batch->total_pfns)
return; return;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(batch->total_pfns != batch->npfns[0]);
skip_pfns = min(batch->total_pfns, skip_pfns); skip_pfns = min(batch->total_pfns, skip_pfns);
batch->pfns[0] += skip_pfns; batch->pfns[0] += skip_pfns;
batch->npfns[0] -= skip_pfns; batch->npfns[0] -= skip_pfns;
...@@ -301,6 +317,8 @@ static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, ...@@ -301,6 +317,8 @@ static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
batch->pfns = temp_kmalloc(&size, backup, backup_len); batch->pfns = temp_kmalloc(&size, backup, backup_len);
if (!batch->pfns) if (!batch->pfns)
return -ENOMEM; return -ENOMEM;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz))
return -EINVAL;
batch->array_size = size / elmsz; batch->array_size = size / elmsz;
batch->npfns = (u32 *)(batch->pfns + batch->array_size); batch->npfns = (u32 *)(batch->pfns + batch->array_size);
batch_clear(batch); batch_clear(batch);
...@@ -429,6 +447,10 @@ static int batch_iommu_map_small(struct iommu_domain *domain, ...@@ -429,6 +447,10 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
unsigned long start_iova = iova; unsigned long start_iova = iova;
int rc; int rc;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
size % PAGE_SIZE);
while (size) { while (size) {
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot); rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
if (rc) if (rc)
...@@ -718,6 +740,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, ...@@ -718,6 +740,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
uintptr_t uptr; uintptr_t uptr;
long rc; long rc;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(last_index < start_index))
return -EINVAL;
if (!user->upages) { if (!user->upages) {
/* All undone in pfn_reader_destroy() */ /* All undone in pfn_reader_destroy() */
user->upages_len = user->upages_len =
...@@ -956,6 +982,10 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns) ...@@ -956,6 +982,10 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
struct iopt_area *area; struct iopt_area *area;
int rc; int rc;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(span->last_used < start_index))
return -EINVAL;
if (span->is_used == 1) { if (span->is_used == 1) {
batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
start_index, span->last_used); start_index, span->last_used);
...@@ -1008,6 +1038,10 @@ static int pfn_reader_next(struct pfn_reader *pfns) ...@@ -1008,6 +1038,10 @@ static int pfn_reader_next(struct pfn_reader *pfns)
while (pfns->batch_end_index != pfns->last_index + 1) { while (pfns->batch_end_index != pfns->last_index + 1) {
unsigned int npfns = pfns->batch.total_pfns; unsigned int npfns = pfns->batch.total_pfns;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(interval_tree_double_span_iter_done(&pfns->span)))
return -EINVAL;
rc = pfn_reader_fill_span(pfns); rc = pfn_reader_fill_span(pfns);
if (rc) if (rc)
return rc; return rc;
...@@ -1091,6 +1125,10 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, ...@@ -1091,6 +1125,10 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
{ {
int rc; int rc;
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
WARN_ON(last_index < start_index))
return -EINVAL;
rc = pfn_reader_init(pfns, pages, start_index, last_index); rc = pfn_reader_init(pfns, pages, start_index, last_index);
if (rc) if (rc)
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment