Commit fbf7adfa authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 3 arches + mapping_mapped

From: Hugh Dickins <hugh@veritas.com>

Some arches refer to page->mapping for their dcache flushing: use
page_mapping(page) for safety, to avoid confusion on anon pages, which will
store a different pointer there - though in most cases flush_dcache_page is
being applied to pagecache pages.

arm has a useful mapping_mapped macro: move that to generic, and add
mapping_writably_mapped, to avoid explicit list_empty checks on i_mmap and
i_mmap_shared in several places.

Very tempted to add page_mapped(page) tests, perhaps along with the
mapping_writably_mapped tests in do_generic_mapping_read and
do_shmem_file_read, to cut down on wasted flush_dcache effort; but the
serialization is not obvious, too unsafe to do in a hurry.
parent da47ca23
......@@ -191,7 +191,7 @@ void __flush_dcache_page(struct page *page)
__cpuc_flush_dcache_page(page_address(page));
if (!page->mapping)
if (!page_mapping(page))
return;
/*
......@@ -292,7 +292,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
if (page->mapping) {
if (page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
......
......@@ -57,16 +57,13 @@ void flush_dcache_page(struct page *page)
{
unsigned long addr;
if (page->mapping &&
list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
SetPageDcacheDirty(page);
return;
}
/*
* We could delay the flush for the !page->mapping case too. But that
* We could delay the flush for the !page_mapping case too. But that
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
......@@ -81,7 +78,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn, addr;
pfn = pte_pfn(pte);
if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page->mapping) &&
if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
Page_dcache_dirty(page)) {
if (pages_do_alias((unsigned long)page_address(page),
address & PAGE_MASK)) {
......
......@@ -68,7 +68,7 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page = pte_page(pte);
if (VALID_PAGE(page) && page->mapping &&
if (VALID_PAGE(page) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page(page_address(page));
......@@ -234,7 +234,7 @@ void __flush_dcache_page(struct page *page)
flush_kernel_dcache_page(page_address(page));
if (!page->mapping)
if (!page_mapping(page))
return;
/* check shared list first if it's not empty...it's usually
* the shortest */
......
......@@ -671,9 +671,9 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
((tlb_type == spitfire) &&
page->mapping != NULL));
page_mapping(page) != NULL));
#else
if (page->mapping != NULL &&
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
#endif
......@@ -694,7 +694,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (tlb_type == spitfire) {
data0 =
((u64)&xcall_flush_dcache_page_spitfire);
if (page->mapping != NULL)
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
......@@ -727,7 +727,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
goto flush_self;
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page->mapping != NULL)
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
__pa(page->virtual),
......
......@@ -139,9 +139,9 @@ __inline__ void flush_dcache_page_impl(struct page *page)
#if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual,
((tlb_type == spitfire) &&
page->mapping != NULL));
page_mapping(page) != NULL));
#else
if (page->mapping != NULL &&
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual));
#endif
......@@ -203,7 +203,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
pfn = pte_pfn(pte);
if (pfn_valid(pfn) &&
(page = pfn_to_page(pfn), page->mapping) &&
(page = pfn_to_page(pfn), page_mapping(page)) &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
......@@ -227,9 +227,7 @@ void flush_dcache_page(struct page *page)
int dirty = test_bit(PG_dcache_dirty, &page->flags);
int dirty_cpu = dcache_dirty_cpu(page);
if (page->mapping &&
list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
if (dirty) {
if (dirty_cpu == smp_processor_id())
return;
......@@ -237,7 +235,7 @@ void flush_dcache_page(struct page *page)
}
set_dcache_dirty(page);
} else {
/* We could delay the flush for the !page->mapping
/* We could delay the flush for the !page_mapping
* case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways.
......@@ -279,7 +277,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageReserved(page) || !page->mapping)
if (PageReserved(page) || !page_mapping(page))
continue;
pgaddr = (unsigned long) page_address(page);
uaddr = address + offset;
......
......@@ -1453,13 +1453,10 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
* and shared.
*/
if (IS_MANDLOCK(inode) &&
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = filp->f_mapping;
if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out;
}
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
error = flock_to_posix_lock(filp, file_lock, &flock);
......@@ -1591,13 +1588,10 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
* and shared.
*/
if (IS_MANDLOCK(inode) &&
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
struct address_space *mapping = filp->f_mapping;
if (!list_empty(&mapping->i_mmap_shared)) {
error = -EAGAIN;
goto out;
}
(inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
mapping_writably_mapped(filp->f_mapping)) {
error = -EAGAIN;
goto out;
}
error = flock64_to_posix_lock(filp, file_lock, &flock);
......
......@@ -596,9 +596,7 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
/*
* Some useful predicates.
*/
#define VN_MAPPED(vp) \
(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap)) || \
(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
PAGECACHE_TAG_DIRTY)
......
......@@ -283,23 +283,19 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page->mapping = NULL), or it has
* userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
* then we _must_ always clean + invalidate the dcache entries associated
* with the kernel mapping.
* If this page isn't mapped (ie, page_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
if (page_mapping(page) && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
......
......@@ -69,8 +69,7 @@ extern void __flush_dcache_page(struct page *page);
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
if (page_mapping(page) && !mapping_mapped(page->mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
__flush_dcache_page(page);
......
......@@ -101,8 +101,8 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep)
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (!page->mapping
|| list_empty(&page->mapping->i_mmap_shared))
if (!page_mapping(page) ||
!mapping_writably_mapped(page->mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
......
......@@ -373,6 +373,26 @@ struct block_device {
int mapping_tagged(struct address_space *mapping, int tag);
/*
* Might pages of this file be mapped into userspace?
*/
static inline int mapping_mapped(struct address_space *mapping)
{
return !list_empty(&mapping->i_mmap) ||
!list_empty(&mapping->i_mmap_shared);
}
/*
* Might pages of this file have been modified in userspace?
* Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*/
static inline int mapping_writably_mapped(struct address_space *mapping)
{
return !list_empty(&mapping->i_mmap_shared);
}
/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
......
......@@ -660,7 +660,7 @@ void do_generic_mapping_read(struct address_space *mapping,
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (!list_empty(&mapping->i_mmap_shared))
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
......
......@@ -1340,7 +1340,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (!list_empty(&mapping->i_mmap_shared))
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* Mark the page accessed if we read the beginning.
......
......@@ -190,13 +190,8 @@ static inline int page_mapping_inuse(struct page *page)
if (!mapping)
return 0;
/* File is mmap'd by somebody. */
if (!list_empty(&mapping->i_mmap))
return 1;
if (!list_empty(&mapping->i_mmap_shared))
return 1;
return 0;
/* File is mmap'd by somebody? */
return mapping_mapped(mapping);
}
static inline int is_page_cache_freeable(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment