Commit 16450b81 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sh: ptep_get_and_clear() compile fix.

From: Paul Mundt <lethal@Linux-SH.ORG>

This fixes up a compile error occuring with ptep_get_and_clear() existing in
pgalloc.h.  We move it to a somewhat more sensible location instead, and take
this opportunity to make some cleanups for use of generic code in the SH-3
case, etc.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6cde659c
/*
* $Id: pg-sh4.c,v 1.1.2.2 2002/11/17 17:56:18 lethal Exp $
*
* arch/sh/mm/pg-sh4.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
......@@ -101,3 +99,24 @@ void copy_user_page(void *to, void *from, unsigned long address,
up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
}
}
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(ptep);
if (!pte_not_present(pte)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
......@@ -84,71 +84,10 @@ static inline void pte_free(struct page *pte)
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)
#if defined(CONFIG_CPU_SH4)
#ifdef CONFIG_CPU_SH4
#define PG_mapped PG_arch_1
/*
* For SH-4, we have our own implementation for ptep_get_and_clear
*/
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(ptep);
if (!pte_not_present(pte)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
struct address_space *mapping = page_mapping(page);
if (!mapping || !mapping_writably_mapped(mapping))
__clear_bit(PG_mapped, &page->flags);
}
}
return pte;
}
#else
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t pte = *ptep;
pte_clear(ptep);
return pte;
}
#endif
/*
* Following functions are same as generic ones.
*/
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
return 1;
}
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
pte_t pte = *ptep;
if (!pte_dirty(pte))
return 0;
set_pte(ptep, pte_mkclean(pte));
return 1;
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
}
static inline void ptep_mkdirty(pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
}
#define check_pgt_cache() do { } while (0)
#endif /* __ASM_SH_PGALLOC_H */
......@@ -120,6 +120,8 @@ extern unsigned long empty_zero_page[1024];
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
#define PAGE_KERNEL_PCC(slot, type) \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type))
......@@ -129,6 +131,7 @@ extern unsigned long empty_zero_page[1024];
#define PAGE_COPY __pgprot(0)
#define PAGE_READONLY __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define PAGE_KERNEL_NOCACHE __pgprot(0)
#define PAGE_KERNEL_RO __pgprot(0)
#define PAGE_KERNEL_PCC __pgprot(0)
#endif
......@@ -255,25 +258,17 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define __swp_type(x) ((x).val & 0xff)
#define __swp_offset(x) ((x).val >> 10)
#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
/*
* Encode and decode a nonlinear file mapping entry
*/
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte))
#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE })
#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
/*
* Routines for update of PTE
*
* We just can use generic implementation, as SuperH has no SMP feature.
* (We needed atomic implementation for SMP)
*
*/
#define pte_same(A,B) (pte_val(A) == pte_val(B))
typedef pte_t *pte_addr_t;
#endif /* !__ASSEMBLY__ */
......@@ -290,12 +285,11 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
extern unsigned int kobjsize(const void *objp);
#endif /* !CONFIG_MMU */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#ifdef CONFIG_CPU_SH4
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
extern inline pte_t ptep_get_and_clear(pte_t *ptep);
#endif
#include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PAGE_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment