Commit 0af4e98b authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

thp: madvise(MADV_HUGEPAGE)

Add madvise MADV_HUGEPAGE to mark regions that are important to be
hugepage backed.  Return -EINVAL if the vma is not of an anonymous type,
or the feature isn't built into the kernel.  Never silently return
success.
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f66055ab
...@@ -97,6 +97,7 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); ...@@ -97,6 +97,7 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
#if HPAGE_PMD_ORDER > MAX_ORDER #if HPAGE_PMD_ORDER > MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator" #error "hugepages can't be allocated by the buddy allocator"
#endif #endif
extern int hugepage_madvise(unsigned long *vm_flags);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */ #else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) #define HPAGE_PMD_SHIFT ({ BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUG(); 0; })
...@@ -113,6 +114,11 @@ static inline int split_huge_page(struct page *page) ...@@ -113,6 +114,11 @@ static inline int split_huge_page(struct page *page)
do { } while (0) do { } while (0)
#define wait_split_huge_page(__anon_vma, __pmd) \ #define wait_split_huge_page(__anon_vma, __pmd) \
do { } while (0) do { } while (0)
static inline int hugepage_madvise(unsigned long *vm_flags)
{
BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */ #endif /* _LINUX_HUGE_MM_H */
...@@ -896,6 +896,22 @@ int split_huge_page(struct page *page) ...@@ -896,6 +896,22 @@ int split_huge_page(struct page *page)
return ret; return ret;
} }
int hugepage_madvise(unsigned long *vm_flags)
{
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_HUGEPAGE | VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
VM_MIXEDMAP | VM_SAO))
return -EINVAL;
*vm_flags |= VM_HUGEPAGE;
return 0;
}
void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
{ {
struct page *page; struct page *page;
......
...@@ -71,6 +71,11 @@ static long madvise_behavior(struct vm_area_struct * vma, ...@@ -71,6 +71,11 @@ static long madvise_behavior(struct vm_area_struct * vma,
if (error) if (error)
goto out; goto out;
break; break;
case MADV_HUGEPAGE:
error = hugepage_madvise(&new_flags);
if (error)
goto out;
break;
} }
if (new_flags == vma->vm_flags) { if (new_flags == vma->vm_flags) {
...@@ -282,6 +287,9 @@ madvise_behavior_valid(int behavior) ...@@ -282,6 +287,9 @@ madvise_behavior_valid(int behavior)
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
case MADV_MERGEABLE: case MADV_MERGEABLE:
case MADV_UNMERGEABLE: case MADV_UNMERGEABLE:
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
case MADV_HUGEPAGE:
#endif #endif
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment