page.h 6.66 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
#ifndef _PPC64_PAGE_H
#define _PPC64_PAGE_H

/*
 * Copyright (C) 2001 PPC64 Team, IBM Corp
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/config.h>

/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT	12
#define PAGE_SIZE	(1UL << PAGE_SHIFT)
#define PAGE_MASK	(~(PAGE_SIZE-1))
#define PAGE_OFFSET_MASK (PAGE_SIZE-1)

#define SID_SHIFT       28
#define SID_MASK        0xfffffffff
#define GET_ESID(x)     (((x) >> SID_SHIFT) & SID_MASK)

/* Define an illegal instr to trap on the bug.
 * We don't use 0 because that marks the end of a function
 * in the ELF ABI.  That's "Boo Boo" in case you wonder...
 */
#define BUG_OPCODE .long 0x00b00b00  /* For asm */
#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */

#ifdef __KERNEL__
#ifndef __ASSEMBLY__
34
#include <asm/naca.h>
35

36
#undef STRICT_MM_TYPECHECKS
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

#define REGION_SIZE   4UL
#define REGION_SHIFT  60UL
#define REGION_MASK   (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
#define REGION_STRIDE (1UL << REGION_SHIFT)

static __inline__ void clear_page(void *addr)
{
	unsigned long lines, line_size;

	line_size = naca->dCacheL1LineSize; 
	lines = naca->dCacheL1LinesPerPage;

	__asm__ __volatile__(
"  	mtctr  	%1\n\
1:      dcbz  	0,%0\n\
	add	%0,%0,%3\n\
	bdnz+	1b"
        : "=r" (addr)
        : "r" (lines), "0" (addr), "r" (line_size)
	: "ctr", "memory");
}

extern void copy_page(void *to, void *from);
struct page;
62 63
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

#ifdef STRICT_MM_TYPECHECKS
/*
 * These are used to make use of C type-checking.  
 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
 */
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned int  pmd; } pmd_t;
typedef struct { unsigned int  pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;

#define pte_val(x)	((x).pte)
#define pmd_val(x)	((x).pmd)
#define pgd_val(x)	((x).pgd)
#define pgprot_val(x)	((x).pgprot)

#define __pte(x)	((pte_t) { (x) } )
#define __pmd(x)	((pmd_t) { (x) } )
#define __pgd(x)	((pgd_t) { (x) } )
#define __pgprot(x)	((pgprot_t) { (x) } )

#else
/*
 * .. while these make it easier on the compiler
 */
typedef unsigned long pte_t;
typedef unsigned int  pmd_t;
typedef unsigned int  pgd_t;
typedef unsigned long pgprot_t;

#define pte_val(x)	(x)
#define pmd_val(x)	(x)
#define pgd_val(x)	(x)
#define pgprot_val(x)	(x)

#define __pte(x)	(x)
#define __pmd(x)	(x)
#define __pgd(x)	(x)
#define __pgprot(x)	(x)

#endif

#ifdef CONFIG_XMON
#include <asm/ptrace.h>
extern void xmon(struct pt_regs *excp);
#define BUG() do { \
	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
	xmon(0); \
} while (0)
#else
#define BUG() do { \
	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
	__asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
} while (0)
#endif

#define PAGE_BUG(page) do { BUG(); } while (0)

/* Pure 2^n version of get_order */
123
static inline int get_order(unsigned long size)
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
{
	int order;

	size = (size-1) >> (PAGE_SHIFT-1);
	order = -1;
	do {
		size >>= 1;
		order++;
	} while (size);
	return order;
}

#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)

#endif /* __ASSEMBLY__ */

/* align addr on a size boundry - adjust address up/down if needed */
#define _ALIGN_UP(addr,size)	(((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size)	((addr)&(~((size)-1)))

/* align addr on a size boundry - adjust address up if needed */
#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)

/* to align the pointer to the (next) double word boundary */
#define DOUBLEWORD_ALIGN(addr)	_ALIGN(addr,sizeof(unsigned long))

/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr)	_ALIGN(addr, PAGE_SIZE)

#ifdef MODULE
#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
#else
#define __page_aligned \
	__attribute__((__aligned__(PAGE_SIZE), \
		__section__(".data.page_aligned")))
#endif


/* This must match the -Ttext linker address            */
/* Note: tophys & tovirt make assumptions about how     */
/*       KERNELBASE is defined for performance reasons. */
/*       When KERNELBASE moves, those macros may have   */
/*             to change!                               */
#define PAGE_OFFSET     0xC000000000000000
#define KERNELBASE      PAGE_OFFSET
#define VMALLOCBASE     0xD000000000000000
#define IOREGIONBASE    0xE000000000000000
171
#define EEHREGIONBASE   0xA000000000000000
172 173

#define IO_REGION_ID       (IOREGIONBASE>>REGION_SHIFT)
174
#define EEH_REGION_ID      (EEHREGIONBASE>>REGION_SHIFT)
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
#define VMALLOC_REGION_ID  (VMALLOCBASE>>REGION_SHIFT)
#define KERNEL_REGION_ID   (KERNELBASE>>REGION_SHIFT)
#define USER_REGION_ID     (0UL)
#define REGION_ID(X)	   (((unsigned long)(X))>>REGION_SHIFT)

/*
 * Define valid/invalid EA bits (for all ranges)
 */
#define VALID_EA_BITS   (0x000001ffffffffffUL)
#define INVALID_EA_BITS (~(REGION_MASK|VALID_EA_BITS))

#define IS_VALID_REGION_ID(x) \
        (((x) == USER_REGION_ID) || ((x) >= KERNEL_REGION_ID))
#define IS_VALID_EA(x) \
        ((!((x) & INVALID_EA_BITS)) && IS_VALID_REGION_ID(REGION_ID(x)))

#define __bpn_to_ba(x) ((((unsigned long)(x))<<PAGE_SHIFT) + KERNELBASE)
#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)

#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))

/* Given that physical addresses do not map 1-1 to absolute addresses, we
 * use these macros to better specify exactly what we want to do.
 * The only restriction on their use is that the absolute address
 * macros cannot be used until after the LMB structure has been
 * initialized in prom.c.  -Peter
 */
#define __v2p(x) ((void *) __pa(x))
#define __v2a(x) ((void *) phys_to_absolute(__pa(x)))
#define __p2a(x) ((void *) phys_to_absolute(x))
#define __p2v(x) ((void *) __va(x))
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))

209
#ifdef CONFIG_DISCONTIGMEM
210
#define page_to_pfn(page)	discontigmem_page_to_pfn(page)
211
#define pfn_to_page(pfn)	discontigmem_pfn_to_page(pfn)
212
#define pfn_valid(pfn)		discontigmem_pfn_valid(pfn)
213
#else
Anton Blanchard's avatar
Anton Blanchard committed
214
#define pfn_to_page(pfn)	(mem_map + (pfn))
215
#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
216
#define pfn_valid(pfn)		((pfn) < max_mapnr)
217
#endif
218

Anton Blanchard's avatar
Anton Blanchard committed
219
#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
220

Anton Blanchard's avatar
Anton Blanchard committed
221
#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
222 223 224 225 226 227

#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#endif /* __KERNEL__ */
#endif /* _PPC64_PAGE_H */