pgtable.h 14 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
#ifndef _PPC64_PGTABLE_H
#define _PPC64_PGTABLE_H

/*
 * This file contains the functions and defines necessary to modify and use
 * the ppc64 hashed page table.
 */

#ifndef __ASSEMBLY__
#include <asm/processor.h>		/* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#endif /* __ASSEMBLY__ */

/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
#define PMD_SIZE	(1UL << PMD_SHIFT)
#define PMD_MASK	(~(PMD_SIZE-1))

/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))

/*
 * Entries per page directory level.  The PTE level must use a 64b record
 * for each page table entry.  The PMD and PGD level use a 32b record for 
 * each entry by assuming that each entry is page aligned.
 */
#define PTE_INDEX_SIZE  9
#define PMD_INDEX_SIZE  10
#define PGD_INDEX_SIZE  10

#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)

38
#define USER_PTRS_PER_PGD	(1024)
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
#define FIRST_USER_PGD_NR	0

#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
                    PGD_INDEX_SIZE + PAGE_SHIFT) 

/*
 * Define the address range of the vmalloc VM area.
 */
#define VMALLOC_START (0xD000000000000000)
#define VMALLOC_END   (VMALLOC_START + VALID_EA_BITS)

/*
 * Define the address range of the imalloc VM area.
 * (used for ioremap)
 */
#define IMALLOC_START (ioremap_bot)
#define IMALLOC_VMADDR(x) ((unsigned long)(x))
#define IMALLOC_BASE  (0xE000000000000000)
#define IMALLOC_END   (IMALLOC_BASE + VALID_EA_BITS)

/*
 * Define the address range mapped virt <-> physical
 */
#define KRANGE_START KERNELBASE
#define KRANGE_END   (KRANGE_START + VALID_EA_BITS)

/*
 * Define the user address range
 */
#define USER_START (0UL)
#define USER_END   (USER_START + VALID_EA_BITS)


/*
 * Bits in a linux-style PTE.  These match the bits in the
 * (hardware-defined) PowerPC PTE as closely as possible.
 */
#define _PAGE_PRESENT	0x001UL	/* software: pte contains a translation */
#define _PAGE_USER	0x002UL	/* matches one of the PP bits */
#define _PAGE_RW	0x004UL	/* software: user write access allowed */
#define _PAGE_GUARDED	0x008UL
#define _PAGE_COHERENT	0x010UL	/* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE	0x020UL	/* I: cache inhibit */
#define _PAGE_WRITETHRU	0x040UL	/* W: cache write-through */
#define _PAGE_DIRTY	0x080UL	/* C: page changed */
#define _PAGE_ACCESSED	0x100UL	/* R: page referenced */
85
#define _PAGE_FILE	0x200UL /* software: pte holds file offset */
86 87 88 89 90
#define _PAGE_HASHPTE	0x400UL	/* software: pte has an associated HPTE */
#define _PAGE_EXEC	0x800UL	/* software: i-cache coherence required */
#define _PAGE_SECONDARY 0x8000UL /* software: HPTE is in secondary group */
#define _PAGE_GROUP_IX  0x7000UL /* software: HPTE index within group */
/* Bits 0x7000 identify the index within an HPT Group */
91
#define _PAGE_HPTEFLAGS (_PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX)
92 93 94 95 96
/* PAGE_MASK gives the right answer below, but only by accident */
/* It should be preserving the high 48 bits and then specifically */
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS)

97
#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
98

99
#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY)
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144

/* __pgprot defined in asm-ppc64/page.h */
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)

#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_WRENABLE)
#define PAGE_KERNEL_CI	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
			       _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)

/*
 * The PowerPC can only do execute protection on a segment (256MB) basis,
 * not on a page basis.  So we consider execute permission the same as read.
 * Also, write permissions imply read permissions.
 * This is the closest we can get..
 */
#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY_X
#define __P010	PAGE_COPY
#define __P011	PAGE_COPY_X
#define __P100	PAGE_READONLY
#define __P101	PAGE_READONLY_X
#define __P110	PAGE_COPY
#define __P111	PAGE_COPY_X

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY_X
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED_X
#define __S100	PAGE_READONLY
#define __S101	PAGE_READONLY_X
#define __S110	PAGE_SHARED
#define __S111	PAGE_SHARED_X

#ifndef __ASSEMBLY__

/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
145
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
146 147 148 149 150
#endif /* __ASSEMBLY__ */

/* shift to put page number into pte */
#define PTE_SHIFT (16)

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
 * to give the PTE page number.  The bottom two bits are for flags. */
#define PMD_TO_PTEPAGE_SHIFT (2)

#ifdef CONFIG_HUGETLB_PAGE
#define _PMD_HUGEPAGE	0x00000001U
#define HUGEPTE_BATCH_SIZE (1<<(HPAGE_SHIFT-PMD_SHIFT))

int hash_huge_page(struct mm_struct *mm, unsigned long access,
		   unsigned long ea, unsigned long vsid, int local);

#define HAVE_ARCH_UNMAPPED_AREA
#else

#define hash_huge_page(mm,a,ea,vsid,local)	-1
#define _PMD_HUGEPAGE	0

#endif

170 171 172 173 174 175 176 177
#ifndef __ASSEMBLY__

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * mk_pte takes a (struct page *) as input
 */
Anton Blanchard's avatar
Anton Blanchard committed
178 179 180 181 182 183 184 185
#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))

#define pfn_pte(pfn,pgprot)						\
({									\
	pte_t pte;							\
	pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) |   		\
                        pgprot_val(pgprot);				\
	pte;								\
186 187 188 189 190 191 192 193 194 195
})

#define pte_modify(_pte, newprot) \
  (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))

#define pte_none(pte)		((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)

/* pte_clear moved to later in this file */

Anton Blanchard's avatar
Anton Blanchard committed
196 197
#define pte_pfn(x)		((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x)		pfn_to_page(pte_pfn(x))
198

199 200
#define pmd_set(pmdp, ptep) 	\
	(pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT))
201
#define pmd_none(pmd)		(!pmd_val(pmd))
202 203 204 205
#define	pmd_hugepage(pmd)	(!!(pmd_val(pmd) & _PMD_HUGEPAGE))
#define	pmd_bad(pmd)		(((pmd_val(pmd)) == 0) || pmd_hugepage(pmd))
#define	pmd_present(pmd)	((!pmd_hugepage(pmd)) \
				 && (pmd_val(pmd) & ~_PMD_HUGEPAGE) != 0)
206
#define	pmd_clear(pmdp)		(pmd_val(*(pmdp)) = 0)
207 208
#define pmd_page_kernel(pmd)	\
	(__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
209
#define pmd_page(pmd)		virt_to_page(pmd_page_kernel(pmd))
210 211 212 213 214 215 216 217 218 219 220
#define pgd_set(pgdp, pmdp)	(pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
#define pgd_none(pgd)		(!pgd_val(pgd))
#define pgd_bad(pgd)		((pgd_val(pgd)) == 0)
#define pgd_present(pgd)	(pgd_val(pgd) != 0UL)
#define pgd_clear(pgdp)		(pgd_val(*(pgdp)) = 0UL)
#define pgd_page(pgd)		(__bpn_to_ba(pgd_val(pgd))) 

/* 
 * Find an entry in a page-table-directory.  We combine the address region 
 * (the high order N bits) and the pgd portion of the address.
 */
221
/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
222
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff)
223 224 225 226 227 228 229 230

#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))

/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
  ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))

/* Find an entry in the third-level page table.. */
231
#define pte_offset_kernel(dir,addr) \
232
  ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
233

234 235 236 237 238
#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_unmap(pte)			do { } while(0)
#define pte_unmap_nested(pte)		do { } while(0)

239 240 241 242 243 244 245 246 247 248 249 250 251
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

/* to find an entry in the ioremap page-table-directory */
#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))

#define pages_to_mb(x)		((x) >> (20-PAGE_SHIFT))

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
252 253 254 255 256
static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER;}
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC;}
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
257
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
258

259 260
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
261

262
static inline pte_t pte_rdprotect(pte_t pte) {
263
	pte_val(pte) &= ~_PAGE_USER; return pte; }
264
static inline pte_t pte_exprotect(pte_t pte) {
265
	pte_val(pte) &= ~_PAGE_EXEC; return pte; }
266
static inline pte_t pte_wrprotect(pte_t pte) {
267
	pte_val(pte) &= ~(_PAGE_RW); return pte; }
268
static inline pte_t pte_mkclean(pte_t pte) {
269
	pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
270
static inline pte_t pte_mkold(pte_t pte) {
271 272
	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }

273
static inline pte_t pte_mkread(pte_t pte) {
274
	pte_val(pte) |= _PAGE_USER; return pte; }
275
static inline pte_t pte_mkexec(pte_t pte) {
276
	pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
277
static inline pte_t pte_mkwrite(pte_t pte) {
278
	pte_val(pte) |= _PAGE_RW; return pte; }
279
static inline pte_t pte_mkdirty(pte_t pte) {
280
	pte_val(pte) |= _PAGE_DIRTY; return pte; }
281
static inline pte_t pte_mkyoung(pte_t pte) {
282 283 284 285 286 287 288 289 290
	pte_val(pte) |= _PAGE_ACCESSED; return pte; }

/* Atomic PTE updates */

static inline unsigned long pte_update( pte_t *p, unsigned long clr,
					unsigned long set )
{
	unsigned long old, tmp;

291 292
	__asm__ __volatile__(
	"1:	ldarx	%0,0,%3		# pte_update\n\
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	andc	%1,%0,%4 \n\
	or	%1,%1,%5 \n\
	stdcx.	%1,0,%3 \n\
	bne-	1b"
	: "=&r" (old), "=&r" (tmp), "=m" (*p)
	: "r" (p), "r" (clr), "r" (set), "m" (*p)
	: "cc" );
	return old;
}

static inline int ptep_test_and_clear_young(pte_t *ptep)
{
	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
}

static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
	return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0;
}

static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
	return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0));
}

static inline void ptep_set_wrprotect(pte_t *ptep)
{
	pte_update(ptep, _PAGE_RW, 0);
}

static inline void ptep_mkdirty(pte_t *ptep)
{
	pte_update(ptep, 0, _PAGE_DIRTY);
}

328 329 330 331 332
/*
 * Macro to mark a page protection value as "uncacheable".
 */
#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)

/*
 * set_pte stores a linux PTE into the linux page table.
 * On machines which use an MMU hash table we avoid changing the
 * _PAGE_HASHPTE bit.
 */
static inline void set_pte(pte_t *ptep, pte_t pte)
{
	pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS);
}

static inline void pte_clear(pte_t * ptep)
{
	pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
}

extern unsigned long ioremap_bot, ioremap_base;

#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)

#define pte_ERROR(e) \
	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))

extern pgd_t swapper_pg_dir[1024];
extern pgd_t ioremap_dir[1024];

extern void paging_init(void);

367 368 369 370 371 372 373
/*
 * This gets called at the end of handling a page fault, when
 * the kernel has put a new PTE into the page table for the process.
 * We use it to put a corresponding HPTE into the hash table
 * ahead of time, instead of waiting for the inevitable extra
 * hash-table miss exception.
 */
374
struct vm_area_struct;
375
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
376 377

/* Encode and de-code a swap entry */
378 379 380 381 382 383 384 385
#define __swp_type(entry)	(((entry).val >> 1) & 0x3f)
#define __swp_offset(entry)	((entry).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) >> PTE_SHIFT })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val << PTE_SHIFT })
#define pte_to_pgoff(pte)	(pte_val(pte) >> PTE_SHIFT)
#define pgoff_to_pte(off)	((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE})
#define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_SHIFT)
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404

/*
 * kern_addr_valid is intended to indicate whether an address is a valid
 * kernel address.  Most 32-bit archs define it as always true (like this)
 * but most 64-bit archs actually perform a test.  What should we do here?
 * The only use is in fs/ncpfs/dir.c
 */
#define kern_addr_valid(addr)	(1)

#define io_remap_page_range remap_page_range 

/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

extern void hpte_init_pSeries(void);
extern void hpte_init_iSeries(void);

405 406
typedef pte_t *pte_addr_t;

407 408 409 410 411 412 413 414 415
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
			      unsigned long va, unsigned long prpn,
			      int secondary, unsigned long hpteflags,
			      int bolted, int large);

long pSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
			 unsigned long prpn, int secondary,
			 unsigned long hpteflags, int bolted, int large);

416 417
#endif /* __ASSEMBLY__ */
#endif /* _PPC64_PGTABLE_H */