Commit d53f8f8d authored by Joey Gouly's avatar Joey Gouly Committed by Will Deacon

kselftest/arm64: mte: user_mem: introduce tag_offset and tag_len

These can be used to place an MTE tag at an address that is not at a
page size boundary.

The kernel prior to 295cf156 ("arm64: Avoid premature usercopy failure"),
would infinite loop if an MTE tag was placed not at a PAGE_SIZE boundary.
This is because the kernel checked if the pages were readable by checking the
first byte of each page, but would then fault in the middle of the page due
to the MTE tag.
Signed-off-by: default avatarJoey Gouly <joey.gouly@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Reviewed-by: default avatarMark Brown <broonie@kernel.org>
Tested-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarShuah Khan <skhan@linuxfoundation.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220209152240.52788-2-joey.gouly@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 39652075
......@@ -19,7 +19,8 @@
static size_t page_sz;
static int check_usermem_access_fault(int mem_type, int mode, int mapping)
static int check_usermem_access_fault(int mem_type, int mode, int mapping,
int tag_offset, int tag_len)
{
int fd, i, err;
char val = 'A';
......@@ -54,10 +55,12 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping)
if (i < len)
goto usermem_acc_err;
/* Tag the next half of memory with different value */
ptr_next = (void *)((unsigned long)ptr + page_sz);
if (!tag_len)
tag_len = len - tag_offset;
/* Tag a part of memory with different value */
ptr_next = (void *)((unsigned long)ptr + tag_offset);
ptr_next = mte_insert_new_tag(ptr_next);
mte_set_tag_address_range(ptr_next, page_sz);
mte_set_tag_address_range(ptr_next, tag_len);
lseek(fd, 0, 0);
/* Copy from file into buffer with invalid tag */
......@@ -100,14 +103,14 @@ int main(int argc, char *argv[])
/* Set test plan */
ksft_set_plan(4);
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, page_sz, 0),
"Check memory access from kernel in sync mode, private mapping and mmap memory\n");
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, page_sz, 0),
"Check memory access from kernel in sync mode, shared mapping and mmap memory\n");
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE),
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, page_sz, 0),
"Check memory access from kernel in async mode, private mapping and mmap memory\n");
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED),
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, page_sz, 0),
"Check memory access from kernel in async mode, shared mapping and mmap memory\n");
mte_restore_setup();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment