Commit df724ced authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

optee: remove vma linked list walk

Use the VMA iterator instead.  Change the calling convention of
__check_mem_type() to pass in the mm instead of the first vma in the
range.

Link: https://lkml.kernel.org/r/20220906194824.2110408-39-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d9fa0e37
...@@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p) ...@@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p)
#endif #endif
} }
static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) static int __check_mem_type(struct mm_struct *mm, unsigned long start,
unsigned long end)
{ {
while (vma && is_normal_memory(vma->vm_page_prot)) { struct vm_area_struct *vma;
if (vma->vm_end >= end) VMA_ITERATOR(vmi, mm, start);
return 0;
vma = vma->vm_next;
}
for_each_vma_range(vmi, vma, end) {
if (!is_normal_memory(vma->vm_page_prot))
return -EINVAL; return -EINVAL;
}
return 0;
} }
int optee_check_mem_type(unsigned long start, size_t num_pages) int optee_check_mem_type(unsigned long start, size_t num_pages)
...@@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages) ...@@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return 0; return 0;
mmap_read_lock(mm); mmap_read_lock(mm);
rc = __check_mem_type(find_vma(mm, start), rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
start + num_pages * PAGE_SIZE);
mmap_read_unlock(mm); mmap_read_unlock(mm);
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment