Commit 6c210482 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] split page_test_and_clear_dirty.

The page_test_and_clear_dirty primitive really consists of two
operations, page_test_dirty and the page_clear_dirty. The combination
of the two is not an atomic operation, so it makes more sense to have
two separate operations instead of one.
In addition to the improved readability of the s390 version of
SetPageUptodate, it now avoids the page_test_dirty operation which is
an insert-storage-key-extended (iske) instruction which is an expensive
operation.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 2fc2d1e9
...@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres ...@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define pte_same(A,B) (pte_val(A) == pte_val(B)) #define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif #endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define page_test_and_clear_dirty(page) (0) #define page_test_dirty(page) (0)
#endif
#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define page_clear_dirty(page) do { } while (0)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define pte_maybe_dirty(pte) pte_dirty(pte) #define pte_maybe_dirty(pte) pte_dirty(pte)
#else #else
#define pte_maybe_dirty(pte) (1) #define pte_maybe_dirty(pte) (1)
......
...@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma, ...@@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma,
* should therefore only be called if it is not mapped in any * should therefore only be called if it is not mapped in any
* address space. * address space.
*/ */
static inline int page_test_and_clear_dirty(struct page *page) static inline int page_test_dirty(struct page *page)
{ {
unsigned long physpage = page_to_phys(page); return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
int skey = page_get_storage_key(physpage); }
if (skey & _PAGE_CHANGED) static inline void page_clear_dirty(struct page *page)
page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); {
return skey & _PAGE_CHANGED; page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
} }
/* /*
...@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long); ...@@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PTE_SAME
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_DIRTY
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
......
...@@ -133,7 +133,7 @@ ...@@ -133,7 +133,7 @@
static inline void SetPageUptodate(struct page *page) static inline void SetPageUptodate(struct page *page)
{ {
if (!test_and_set_bit(PG_uptodate, &page->flags)) if (!test_and_set_bit(PG_uptodate, &page->flags))
page_test_and_clear_dirty(page); page_clear_dirty(page);
} }
#else #else
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
......
...@@ -498,9 +498,11 @@ int page_mkclean(struct page *page) ...@@ -498,9 +498,11 @@ int page_mkclean(struct page *page)
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (mapping) if (mapping)
ret = page_mkclean_file(mapping, page); ret = page_mkclean_file(mapping, page);
if (page_test_and_clear_dirty(page)) if (page_test_dirty(page)) {
page_clear_dirty(page);
ret = 1; ret = 1;
} }
}
return ret; return ret;
} }
...@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) ...@@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
* Leaving it set also helps swapoff to reinstate ptes * Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache. * faster for those pages still in swapcache.
*/ */
if (page_test_and_clear_dirty(page)) if (page_test_dirty(page)) {
page_clear_dirty(page);
set_page_dirty(page); set_page_dirty(page);
}
__dec_zone_page_state(page, __dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment