Commit 2f29d16c authored by John Hubbard's avatar John Hubbard Committed by Andrew Morton

selftests/mm: fix unused variable warnings in hugetlb-madvise.c, migration.c

Dummy variables are required in order to make these two (similar)
routines work, so in both cases, declare the variables as volatile in
order to avoid the clang compiler warning.

Furthermore, in order to ensure that each test actually does what is
intended, add an asm volatile invocation (thanks to David Hildenbrand
for the suggestion), with a clarifying comment so that it survives
future maintenance.

Link: https://lkml.kernel.org/r/20230606071637.267103-3-jhubbard@nvidia.comSigned-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Tested-by: default avatarMuhammad Usama Anjum <usama.anjum@collabora.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9a61100e
...@@ -65,11 +65,15 @@ void write_fault_pages(void *addr, unsigned long nr_pages) ...@@ -65,11 +65,15 @@ void write_fault_pages(void *addr, unsigned long nr_pages)
void read_fault_pages(void *addr, unsigned long nr_pages) void read_fault_pages(void *addr, unsigned long nr_pages)
{ {
unsigned long dummy = 0; volatile unsigned long dummy = 0;
unsigned long i; unsigned long i;
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++) {
dummy += *((unsigned long *)(addr + (i * huge_page_size))); dummy += *((unsigned long *)(addr + (i * huge_page_size)));
/* Prevent the compiler from optimizing out the entire loop: */
asm volatile("" : "+r" (dummy));
}
} }
int main(int argc, char **argv) int main(int argc, char **argv)
......
...@@ -95,12 +95,15 @@ int migrate(uint64_t *ptr, int n1, int n2) ...@@ -95,12 +95,15 @@ int migrate(uint64_t *ptr, int n1, int n2)
void *access_mem(void *ptr) void *access_mem(void *ptr)
{ {
uint64_t y = 0; volatile uint64_t y = 0;
volatile uint64_t *x = ptr; volatile uint64_t *x = ptr;
while (1) { while (1) {
pthread_testcancel(); pthread_testcancel();
y += *x; y += *x;
/* Prevent the compiler from optimizing out the writes to y: */
asm volatile("" : "+r" (y));
} }
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment