Commit 8eb4b3b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'copy-struct-from-user-v5.4-rc4' of...

Merge tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux

Pull usercopy test fixlets from Christian Brauner:
 "This contains two improvements for the copy_struct_from_user() tests:

   - a coding style change to get rid of the ugly "if ((ret |= test()))"
     pointed out when pulling the original patchset.

   - avoid a soft lockups when running the usercopy tests on machines
     with large page sizes by scanning only a 1024 byte region"

* tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux:
  usercopy: Avoid soft lockups in test_check_nonzero_user()
  lib: test_user_copy: style cleanup
parents 7571438a f418dddf
...@@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size) ...@@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size)
static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
{ {
int ret = 0; int ret = 0;
size_t start, end, i; size_t start, end, i, zero_start, zero_end;
size_t zero_start = size / 4;
size_t zero_end = size - zero_start; if (test(size < 2 * PAGE_SIZE, "buffer too small"))
return -EINVAL;
/*
* We want to cross a page boundary to exercise the code more
* effectively. We also don't want to make the size we scan too large,
* otherwise the test can take a long time and cause soft lockups. So
* scan a 1024 byte region across the page boundary.
*/
size = 1024;
start = PAGE_SIZE - (size / 2);
kmem += start;
umem += start;
zero_start = size / 4;
zero_end = size - zero_start;
/* /*
* We conduct a series of check_nonzero_user() tests on a block of memory * We conduct a series of check_nonzero_user() tests on a block of
* with the following byte-pattern (trying every possible [start,end] * memory with the following byte-pattern (trying every possible
* pair): * [start,end] pair):
* *
* [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
* *
* And we verify that check_nonzero_user() acts identically to memchr_inv(). * And we verify that check_nonzero_user() acts identically to
* memchr_inv().
*/ */
memset(kmem, 0x0, size); memset(kmem, 0x0, size);
...@@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem, ...@@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem,
size_t ksize, usize; size_t ksize, usize;
umem_src = kmalloc(size, GFP_KERNEL); umem_src = kmalloc(size, GFP_KERNEL);
if ((ret |= test(umem_src == NULL, "kmalloc failed"))) ret = test(umem_src == NULL, "kmalloc failed");
if (ret)
goto out_free; goto out_free;
expected = kmalloc(size, GFP_KERNEL); expected = kmalloc(size, GFP_KERNEL);
if ((ret |= test(expected == NULL, "kmalloc failed"))) ret = test(expected == NULL, "kmalloc failed");
if (ret)
goto out_free; goto out_free;
/* Fill umem with a fixed byte pattern. */ /* Fill umem with a fixed byte pattern. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment