Commit 574c5b3d authored by Thomas Hellstrom's avatar Thomas Hellstrom

mm: Add a vmf_insert_mixed_prot() function

The TTM module today uses a hack to be able to set a different page
protection than struct vm_area_struct::vm_page_prot. To be able to do
this properly, add the needed vm functionality as vmf_insert_mixed_prot().

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: "Christian König" <christian.koenig@amd.com>
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 71e72740
...@@ -2533,6 +2533,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ...@@ -2533,6 +2533,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot); unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn); pfn_t pfn);
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn); unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
......
...@@ -312,7 +312,12 @@ struct vm_area_struct { ...@@ -312,7 +312,12 @@ struct vm_area_struct {
/* Second cache line starts here. */ /* Second cache line starts here. */
struct mm_struct *vm_mm; /* The address space we belong to. */ struct mm_struct *vm_mm; /* The address space we belong to. */
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
* Access permissions of this VMA.
* See vmf_insert_mixed_prot() for discussion.
*/
pgprot_t vm_page_prot;
unsigned long vm_flags; /* Flags, see mm.h. */ unsigned long vm_flags; /* Flags, see mm.h. */
/* /*
......
...@@ -1664,6 +1664,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, ...@@ -1664,6 +1664,9 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
* vmf_insert_pfn_prot should only be used if using multiple VMAs is * vmf_insert_pfn_prot should only be used if using multiple VMAs is
* impractical. * impractical.
* *
* See vmf_insert_mixed_prot() for a discussion of the implication of using
* a value of @pgprot different from that of @vma->vm_page_prot.
*
* Context: Process context. May allocate using %GFP_KERNEL. * Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value. * Return: vm_fault_t value.
*/ */
...@@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) ...@@ -1737,9 +1740,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
} }
static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn, bool mkwrite) unsigned long addr, pfn_t pfn, pgprot_t pgprot,
bool mkwrite)
{ {
pgprot_t pgprot = vma->vm_page_prot;
int err; int err;
BUG_ON(!vm_mixed_ok(vma, pfn)); BUG_ON(!vm_mixed_ok(vma, pfn));
...@@ -1782,10 +1785,42 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, ...@@ -1782,10 +1785,42 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
/**
* vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
* @pgprot: pgprot flags for the inserted page
*
* This is exactly like vmf_insert_mixed(), except that it allows drivers to
* to override pgprot on a per-page basis.
*
* Typically this function should be used by drivers to set caching- and
* encryption bits different than those of @vma->vm_page_prot, because
* the caching- or encryption mode may not be known at mmap() time.
* This is ok as long as @vma->vm_page_prot is not used by the core vm
* to set caching and encryption bits for those vmas (except for COW pages).
* This is ensured by core vm only modifying these page table entries using
* functions that don't touch caching- or encryption bits, using pte_modify()
* if needed. (See for example mprotect()).
* Also when new page-table entries are created, this is only done using the
* fault() callback, and never using the value of vma->vm_page_prot,
* except for page-table entries that point to anonymous pages as the result
* of COW.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t pgprot)
{
return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
}
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn) pfn_t pfn)
{ {
return __vm_insert_mixed(vma, addr, pfn, false); return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
} }
EXPORT_SYMBOL(vmf_insert_mixed); EXPORT_SYMBOL(vmf_insert_mixed);
...@@ -1797,7 +1832,7 @@ EXPORT_SYMBOL(vmf_insert_mixed); ...@@ -1797,7 +1832,7 @@ EXPORT_SYMBOL(vmf_insert_mixed);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn) unsigned long addr, pfn_t pfn)
{ {
return __vm_insert_mixed(vma, addr, pfn, true); return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
} }
EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment