Commit 4b4bb46d authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

dax: clear dirty entry tags on cache flush

Currently we never clear dirty tags in DAX mappings and thus address
ranges to flush accumulate.  Now that we have locking of radix tree
entries, we have all the locking necessary to reliably clear the radix
tree dirty tag when flushing caches for corresponding address range.
Similarly to page_mkclean() we also have to write-protect pages to get a
page fault when the page is next written to so that we can mark the
entry dirty again.

Link: http://lkml.kernel.org/r/1479460644-25076-21-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2f89dc12
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/vmstat.h> #include <linux/vmstat.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/mmu_notifier.h>
#include <linux/iomap.h> #include <linux/iomap.h>
#include "internal.h" #include "internal.h"
...@@ -614,6 +615,59 @@ static void *dax_insert_mapping_entry(struct address_space *mapping, ...@@ -614,6 +615,59 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
return new_entry; return new_entry;
} }
static inline unsigned long
pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
{
unsigned long address;
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
return address;
}
/* Walk all mappings of a given index of a file and writeprotect them */
static void dax_mapping_entry_mkclean(struct address_space *mapping,
pgoff_t index, unsigned long pfn)
{
struct vm_area_struct *vma;
pte_t *ptep;
pte_t pte;
spinlock_t *ptl;
bool changed;
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
unsigned long address;
cond_resched();
if (!(vma->vm_flags & VM_SHARED))
continue;
address = pgoff_address(index, vma);
changed = false;
if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
continue;
if (pfn != pte_pfn(*ptep))
goto unlock;
if (!pte_dirty(*ptep) && !pte_write(*ptep))
goto unlock;
flush_cache_page(vma, address, pfn);
pte = ptep_clear_flush(vma, address, ptep);
pte = pte_wrprotect(pte);
pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, address, ptep, pte);
changed = true;
unlock:
pte_unmap_unlock(ptep, ptl);
if (changed)
mmu_notifier_invalidate_page(vma->vm_mm, address);
}
i_mmap_unlock_read(mapping);
}
static int dax_writeback_one(struct block_device *bdev, static int dax_writeback_one(struct block_device *bdev,
struct address_space *mapping, pgoff_t index, void *entry) struct address_space *mapping, pgoff_t index, void *entry)
{ {
...@@ -687,7 +741,17 @@ static int dax_writeback_one(struct block_device *bdev, ...@@ -687,7 +741,17 @@ static int dax_writeback_one(struct block_device *bdev,
goto unmap; goto unmap;
} }
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
wb_cache_pmem(dax.addr, dax.size); wb_cache_pmem(dax.addr, dax.size);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
* the pfn mappings are writeprotected and fault waits for mapping
* entry lock.
*/
spin_lock_irq(&mapping->tree_lock);
radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
spin_unlock_irq(&mapping->tree_lock);
unmap: unmap:
dax_unmap_atomic(bdev, &dax); dax_unmap_atomic(bdev, &dax);
put_locked_mapping_entry(mapping, index, entry); put_locked_mapping_entry(mapping, index, entry);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment