Commit c49fcfcd authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds

mm/shmem: update file sealing comments and file checking

In preparation for memfd code restructure, update comments, definitions
and function names dealing with file sealing to indicate that tmpfs and
hugetlbfs are the supported filesystems.  Also, change file pointer
checks in memfd_file_seals_ptr to use defined interfaces instead of
directly referencing file_operation structs.

Link: http://lkml.kernel.org/r/20180415182119.4517-3-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarKhalid Aziz <khalid.aziz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Marc-Andr Lureau <marcandre.lureau@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5b9c98f3
...@@ -2620,12 +2620,13 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) ...@@ -2620,12 +2620,13 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
/* /*
* We need a tag: a new tag would expand every radix_tree_node by 8 bytes, * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
* so reuse a tag which we firmly believe is never set or cleared on shmem. * so reuse a tag which we firmly believe is never set or cleared on tmpfs
* or hugetlbfs because they are memory only filesystems.
*/ */
#define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE #define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
#define LAST_SCAN 4 /* about 150ms max */ #define LAST_SCAN 4 /* about 150ms max */
static void shmem_tag_pins(struct address_space *mapping) static void memfd_tag_pins(struct address_space *mapping)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void __rcu **slot; void __rcu **slot;
...@@ -2646,7 +2647,7 @@ static void shmem_tag_pins(struct address_space *mapping) ...@@ -2646,7 +2647,7 @@ static void shmem_tag_pins(struct address_space *mapping)
} else if (page_count(page) - page_mapcount(page) > 1) { } else if (page_count(page) - page_mapcount(page) > 1) {
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
radix_tree_tag_set(&mapping->i_pages, iter.index, radix_tree_tag_set(&mapping->i_pages, iter.index,
SHMEM_TAG_PINNED); MEMFD_TAG_PINNED);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
} }
...@@ -2667,7 +2668,7 @@ static void shmem_tag_pins(struct address_space *mapping) ...@@ -2667,7 +2668,7 @@ static void shmem_tag_pins(struct address_space *mapping)
* The caller must guarantee that no new user will acquire writable references * The caller must guarantee that no new user will acquire writable references
* to those pages to avoid races. * to those pages to avoid races.
*/ */
static int shmem_wait_for_pins(struct address_space *mapping) static int memfd_wait_for_pins(struct address_space *mapping)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void __rcu **slot; void __rcu **slot;
...@@ -2675,11 +2676,11 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -2675,11 +2676,11 @@ static int shmem_wait_for_pins(struct address_space *mapping)
struct page *page; struct page *page;
int error, scan; int error, scan;
shmem_tag_pins(mapping); memfd_tag_pins(mapping);
error = 0; error = 0;
for (scan = 0; scan <= LAST_SCAN; scan++) { for (scan = 0; scan <= LAST_SCAN; scan++) {
if (!radix_tree_tagged(&mapping->i_pages, SHMEM_TAG_PINNED)) if (!radix_tree_tagged(&mapping->i_pages, MEMFD_TAG_PINNED))
break; break;
if (!scan) if (!scan)
...@@ -2690,7 +2691,7 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -2690,7 +2691,7 @@ static int shmem_wait_for_pins(struct address_space *mapping)
start = 0; start = 0;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter,
start, SHMEM_TAG_PINNED) { start, MEMFD_TAG_PINNED) {
page = radix_tree_deref_slot(slot); page = radix_tree_deref_slot(slot);
if (radix_tree_exception(page)) { if (radix_tree_exception(page)) {
...@@ -2717,7 +2718,7 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -2717,7 +2718,7 @@ static int shmem_wait_for_pins(struct address_space *mapping)
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
radix_tree_tag_clear(&mapping->i_pages, radix_tree_tag_clear(&mapping->i_pages,
iter.index, SHMEM_TAG_PINNED); iter.index, MEMFD_TAG_PINNED);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
continue_resched: continue_resched:
if (need_resched()) { if (need_resched()) {
...@@ -2733,11 +2734,11 @@ static int shmem_wait_for_pins(struct address_space *mapping) ...@@ -2733,11 +2734,11 @@ static int shmem_wait_for_pins(struct address_space *mapping)
static unsigned int *memfd_file_seals_ptr(struct file *file) static unsigned int *memfd_file_seals_ptr(struct file *file)
{ {
if (file->f_op == &shmem_file_operations) if (shmem_file(file))
return &SHMEM_I(file_inode(file))->seals; return &SHMEM_I(file_inode(file))->seals;
#ifdef CONFIG_HUGETLBFS #ifdef CONFIG_HUGETLBFS
if (file->f_op == &hugetlbfs_file_operations) if (is_file_hugepages(file))
return &HUGETLBFS_I(file_inode(file))->seals; return &HUGETLBFS_I(file_inode(file))->seals;
#endif #endif
...@@ -2757,16 +2758,17 @@ static int memfd_add_seals(struct file *file, unsigned int seals) ...@@ -2757,16 +2758,17 @@ static int memfd_add_seals(struct file *file, unsigned int seals)
/* /*
* SEALING * SEALING
* Sealing allows multiple parties to share a shmem-file but restrict * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
* access to a specific subset of file operations. Seals can only be * but restrict access to a specific subset of file operations. Seals
* added, but never removed. This way, mutually untrusted parties can * can only be added, but never removed. This way, mutually untrusted
* share common memory regions with a well-defined policy. A malicious * parties can share common memory regions with a well-defined policy.
* peer can thus never perform unwanted operations on a shared object. * A malicious peer can thus never perform unwanted operations on a
* shared object.
* *
* Seals are only supported on special shmem-files and always affect * Seals are only supported on special tmpfs or hugetlbfs files and
* the whole underlying inode. Once a seal is set, it may prevent some * always affect the whole underlying inode. Once a seal is set, it
* kinds of access to the file. Currently, the following seals are * may prevent some kinds of access to the file. Currently, the
* defined: * following seals are defined:
* SEAL_SEAL: Prevent further seals from being set on this file * SEAL_SEAL: Prevent further seals from being set on this file
* SEAL_SHRINK: Prevent the file from shrinking * SEAL_SHRINK: Prevent the file from shrinking
* SEAL_GROW: Prevent the file from growing * SEAL_GROW: Prevent the file from growing
...@@ -2780,9 +2782,9 @@ static int memfd_add_seals(struct file *file, unsigned int seals) ...@@ -2780,9 +2782,9 @@ static int memfd_add_seals(struct file *file, unsigned int seals)
* added. * added.
* *
* Semantics of sealing are only defined on volatile files. Only * Semantics of sealing are only defined on volatile files. Only
* anonymous shmem files support sealing. More importantly, seals are * anonymous tmpfs and hugetlbfs files support sealing. More
* never written to disk. Therefore, there's no plan to support it on * importantly, seals are never written to disk. Therefore, there's
* other file types. * no plan to support it on other file types.
*/ */
if (!(file->f_mode & FMODE_WRITE)) if (!(file->f_mode & FMODE_WRITE))
...@@ -2808,7 +2810,7 @@ static int memfd_add_seals(struct file *file, unsigned int seals) ...@@ -2808,7 +2810,7 @@ static int memfd_add_seals(struct file *file, unsigned int seals)
if (error) if (error)
goto unlock; goto unlock;
error = shmem_wait_for_pins(file->f_mapping); error = memfd_wait_for_pins(file->f_mapping);
if (error) { if (error) {
mapping_allow_writable(file->f_mapping); mapping_allow_writable(file->f_mapping);
goto unlock; goto unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment