Commit fc4d1823 authored by Zi Yan's avatar Zi Yan Committed by Andrew Morton

mm: huge_memory: enable debugfs to split huge pages to any order

It is used to test split_huge_page_to_list_to_order for pagecache THPs. 
Also add test cases for split_huge_page_to_list_to_order via both debugfs.

[ziy@nvidia.com: fix issue discovered with NFS]
  Link: https://lkml.kernel.org/r/262E4DAA-4A78-4328-B745-1355AE356A07@nvidia.com
Link: https://lkml.kernel.org/r/20240226205534.1603748-9-zi.yan@sent.comSigned-off-by: default avatarZi Yan <ziy@nvidia.com>
Tested-by: default avatarAishwarya TCV <aishwarya.tcv@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michal Koutny <mkoutny@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Cc: Aishwarya TCV <aishwarya.tcv@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c010d47f
......@@ -3419,7 +3419,7 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
}
static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
unsigned long vaddr_end)
unsigned long vaddr_end, unsigned int new_order)
{
int ret = 0;
struct task_struct *task;
......@@ -3483,13 +3483,19 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
goto next;
total++;
if (!can_split_folio(folio, NULL))
/*
* For folios with private, split_huge_page_to_list_to_order()
* will try to drop it before split and then check if the folio
* can be split or not. So skip the check here.
*/
if (!folio_test_private(folio) &&
!can_split_folio(folio, NULL))
goto next;
if (!folio_trylock(folio))
goto next;
if (!split_folio(folio))
if (!split_folio_to_order(folio, new_order))
split++;
folio_unlock(folio);
......@@ -3507,7 +3513,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
}
static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t off_end)
pgoff_t off_end, unsigned int new_order)
{
struct filename *file;
struct file *candidate;
......@@ -3546,7 +3552,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
if (!folio_trylock(folio))
goto next;
if (!split_folio(folio))
if (!split_folio_to_order(folio, new_order))
split++;
folio_unlock(folio);
......@@ -3571,10 +3577,14 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
{
static DEFINE_MUTEX(split_debug_mutex);
ssize_t ret;
/* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
/*
* hold pid, start_vaddr, end_vaddr, new_order or
* file_path, off_start, off_end, new_order
*/
char input_buf[MAX_INPUT_BUF_SZ];
int pid;
unsigned long vaddr_start, vaddr_end;
unsigned int new_order = 0;
ret = mutex_lock_interruptible(&split_debug_mutex);
if (ret)
......@@ -3603,29 +3613,29 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
goto out;
}
ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
if (ret != 2) {
ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
if (ret != 2 && ret != 3) {
ret = -EINVAL;
goto out;
}
ret = split_huge_pages_in_file(file_path, off_start, off_end);
ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
if (!ret)
ret = input_len;
goto out;
}
ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
if (ret == 1 && pid == 1) {
split_huge_pages_all();
ret = strlen(input_buf);
goto out;
} else if (ret != 3) {
} else if (ret != 3 && ret != 4) {
ret = -EINVAL;
goto out;
}
ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
if (!ret)
ret = strlen(input_buf);
out:
......
......@@ -399,7 +399,27 @@ CATEGORY="thp" run_test ./khugepaged -s 2
CATEGORY="thp" run_test ./transhuge-stress -d 20
CATEGORY="thp" run_test ./split_huge_page_test
# Try to create XFS if not provided
if [ -z "${SPLIT_HUGE_PAGE_TEST_XFS_PATH}" ]; then
if test_selected "thp"; then
if grep xfs /proc/filesystems &>/dev/null; then
XFS_IMG=$(mktemp /tmp/xfs_img_XXXXXX)
SPLIT_HUGE_PAGE_TEST_XFS_PATH=$(mktemp -d /tmp/xfs_dir_XXXXXX)
truncate -s 314572800 ${XFS_IMG}
mkfs.xfs -q ${XFS_IMG}
mount -o loop ${XFS_IMG} ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
MOUNTED_XFS=1
fi
fi
fi
CATEGORY="thp" run_test ./split_huge_page_test ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
if [ -n "${MOUNTED_XFS}" ]; then
umount ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
rmdir ${SPLIT_HUGE_PAGE_TEST_XFS_PATH}
rm -f ${XFS_IMG}
fi
CATEGORY="migration" run_test ./migration
......
......@@ -16,6 +16,7 @@
#include <sys/mount.h>
#include <malloc.h>
#include <stdbool.h>
#include <time.h>
#include "vm_util.h"
#include "../kselftest.h"
......@@ -24,10 +25,11 @@ unsigned int pageshift;
uint64_t pmd_pagesize;
#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages"
#define SMAP_PATH "/proc/self/smaps"
#define INPUT_MAX 80
#define PID_FMT "%d,0x%lx,0x%lx"
#define PATH_FMT "%s,0x%lx,0x%lx"
#define PID_FMT "%d,0x%lx,0x%lx,%d"
#define PATH_FMT "%s,0x%lx,0x%lx,%d"
#define PFN_MASK ((1UL<<55)-1)
#define KPF_THP (1UL<<22)
......@@ -102,7 +104,7 @@ void split_pmd_thp(void)
/* split all THPs */
write_debugfs(PID_FMT, getpid(), (uint64_t)one_page,
(uint64_t)one_page + len);
(uint64_t)one_page + len, 0);
for (i = 0; i < len; i++)
if (one_page[i] != (char)i)
......@@ -177,7 +179,7 @@ void split_pte_mapped_thp(void)
/* split all remapped THPs */
write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped,
(uint64_t)pte_mapped + pagesize * 4);
(uint64_t)pte_mapped + pagesize * 4, 0);
/* smap does not show THPs after mremap, use kpageflags instead */
thp_size = 0;
......@@ -237,7 +239,7 @@ void split_file_backed_thp(void)
}
/* split the file-backed THP */
write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end);
write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0);
status = unlink(testfile);
if (status) {
......@@ -265,8 +267,149 @@ void split_file_backed_thp(void)
ksft_exit_fail_msg("Error occurred\n");
}
bool prepare_thp_fs(const char *xfs_path, char *thp_fs_template,
const char **thp_fs_loc)
{
if (xfs_path) {
*thp_fs_loc = xfs_path;
return false;
}
*thp_fs_loc = mkdtemp(thp_fs_template);
if (!*thp_fs_loc)
ksft_exit_fail_msg("cannot create temp folder\n");
return true;
}
void cleanup_thp_fs(const char *thp_fs_loc, bool created_tmp)
{
int status;
if (!created_tmp)
return;
status = rmdir(thp_fs_loc);
if (status)
ksft_exit_fail_msg("cannot remove tmp dir: %s\n",
strerror(errno));
}
int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
char **addr)
{
size_t i;
int dummy;
srand(time(NULL));
*fd = open(testfile, O_CREAT | O_RDWR, 0664);
if (*fd == -1)
ksft_exit_fail_msg("Failed to create a file at %s\n", testfile);
for (i = 0; i < fd_size; i++) {
unsigned char byte = (unsigned char)i;
write(*fd, &byte, sizeof(byte));
}
close(*fd);
sync();
*fd = open("/proc/sys/vm/drop_caches", O_WRONLY);
if (*fd == -1) {
ksft_perror("open drop_caches");
goto err_out_unlink;
}
if (write(*fd, "3", 1) != 1) {
ksft_perror("write to drop_caches");
goto err_out_unlink;
}
close(*fd);
*fd = open(testfile, O_RDWR);
if (*fd == -1) {
ksft_perror("Failed to open testfile\n");
goto err_out_unlink;
}
*addr = mmap(NULL, fd_size, PROT_READ|PROT_WRITE, MAP_SHARED, *fd, 0);
if (*addr == (char *)-1) {
ksft_perror("cannot mmap");
goto err_out_close;
}
madvise(*addr, fd_size, MADV_HUGEPAGE);
for (size_t i = 0; i < fd_size; i++)
dummy += *(*addr + i);
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
munmap(*addr, fd_size);
close(*fd);
unlink(testfile);
ksft_test_result_skip("Pagecache folio split skipped\n");
return -2;
}
return 0;
err_out_close:
close(*fd);
err_out_unlink:
unlink(testfile);
ksft_exit_fail_msg("Failed to create large pagecache folios\n");
return -1;
}
void split_thp_in_pagecache_to_order(size_t fd_size, int order, const char *fs_loc)
{
int fd;
char *addr;
size_t i;
char testfile[INPUT_MAX];
int err = 0;
err = snprintf(testfile, INPUT_MAX, "%s/test", fs_loc);
if (err < 0)
ksft_exit_fail_msg("cannot generate right test file name\n");
err = create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr);
if (err)
return;
err = 0;
write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order);
for (i = 0; i < fd_size; i++)
if (*(addr + i) != (char)i) {
ksft_print_msg("%lu byte corrupted in the file\n", i);
err = EXIT_FAILURE;
goto out;
}
if (!check_huge_file(addr, 0, pmd_pagesize)) {
ksft_print_msg("Still FilePmdMapped not split\n");
err = EXIT_FAILURE;
goto out;
}
out:
munmap(addr, fd_size);
close(fd);
unlink(testfile);
if (err)
ksft_exit_fail_msg("Split PMD-mapped pagecache folio to order %d failed\n", order);
ksft_test_result_pass("Split PMD-mapped pagecache folio to order %d passed\n", order);
}
int main(int argc, char **argv)
{
int i;
size_t fd_size;
char *optional_xfs_path = NULL;
char fs_loc_template[] = "/tmp/thp_fs_XXXXXX";
const char *fs_loc;
bool created_tmp;
ksft_print_header();
if (geteuid() != 0) {
......@@ -274,7 +417,10 @@ int main(int argc, char **argv)
ksft_finished();
}
ksft_set_plan(3);
if (argc > 1)
optional_xfs_path = argv[1];
ksft_set_plan(3+9);
pagesize = getpagesize();
pageshift = ffs(pagesize) - 1;
......@@ -282,9 +428,19 @@ int main(int argc, char **argv)
if (!pmd_pagesize)
ksft_exit_fail_msg("Reading PMD pagesize failed\n");
fd_size = 2 * pmd_pagesize;
split_pmd_thp();
split_pte_mapped_thp();
split_file_backed_thp();
created_tmp = prepare_thp_fs(optional_xfs_path, fs_loc_template,
&fs_loc);
for (i = 8; i >= 0; i--)
split_thp_in_pagecache_to_order(fd_size, i, fs_loc);
cleanup_thp_fs(fs_loc, created_tmp);
ksft_finished();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment