Commit fc72f345 authored by Jeff Dike's avatar Jeff Dike

Added highmem support.

The UML initialization code marks memory that doesn't fit in the
kernel's address space as highmem, and later sets up the UML data
structures for it, and frees that memory to the mm system as highmem.
parent f20bf018
...@@ -32,6 +32,7 @@ bool 'Symmetric multi-processing support' CONFIG_UML_SMP ...@@ -32,6 +32,7 @@ bool 'Symmetric multi-processing support' CONFIG_UML_SMP
define_bool CONFIG_SMP $CONFIG_UML_SMP define_bool CONFIG_SMP $CONFIG_UML_SMP
int 'Nesting level' CONFIG_NEST_LEVEL 0 int 'Nesting level' CONFIG_NEST_LEVEL 0
int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1 int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1
bool 'Highmem support' CONFIG_HIGHMEM
endmenu endmenu
mainmenu_option next_comment mainmenu_option next_comment
......
...@@ -51,8 +51,8 @@ extern unsigned long task_size; ...@@ -51,8 +51,8 @@ extern unsigned long task_size;
extern int init_mem_user(void); extern int init_mem_user(void);
extern int create_mem_file(unsigned long len); extern int create_mem_file(unsigned long len);
extern void setup_range(int fd, char *driver, unsigned long start, extern void setup_range(int fd, char *driver, unsigned long start,
unsigned long total, struct mem_region *region, unsigned long total, int need_vm,
void *reserved); struct mem_region *region, void *reserved);
extern void map(unsigned long virt, unsigned long p, unsigned long len, extern void map(unsigned long virt, unsigned long p, unsigned long len,
int r, int w, int x); int r, int w, int x);
extern int unmap(void *addr, int len); extern int unmap(void *addr, int len);
...@@ -62,7 +62,7 @@ extern void setup_memory(void *entry); ...@@ -62,7 +62,7 @@ extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out); extern unsigned long find_iomem(char *driver, unsigned long *len_out);
extern int init_maps(struct mem_region *region); extern int init_maps(struct mem_region *region);
extern int nregions(void); extern int nregions(void);
extern void setup_one_range(int n, int fd, char *driver, unsigned long start, extern void init_range(int n, int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region); unsigned long len, struct mem_region *region);
extern int reserve_vm(unsigned long start, unsigned long end, void *e); extern int reserve_vm(unsigned long start, unsigned long end, void *e);
extern unsigned long get_vm(unsigned long len); extern unsigned long get_vm(unsigned long len);
......
...@@ -27,6 +27,7 @@ extern unsigned long uml_physmem; ...@@ -27,6 +27,7 @@ extern unsigned long uml_physmem;
extern unsigned long uml_reserved; extern unsigned long uml_reserved;
extern unsigned long end_vm; extern unsigned long end_vm;
extern unsigned long start_vm; extern unsigned long start_vm;
extern unsigned long highmem;
extern int tracing_pid; extern int tracing_pid;
extern int honeypot; extern int honeypot;
......
/* /*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com) * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL * Licensed under the GPL
*/ */
...@@ -27,13 +27,13 @@ ...@@ -27,13 +27,13 @@
#include "init.h" #include "init.h"
unsigned long high_physmem; unsigned long high_physmem;
unsigned long low_physmem; unsigned long low_physmem;
unsigned long vm_start; unsigned long vm_start;
unsigned long vm_end; unsigned long vm_end;
unsigned long highmem;
pgd_t swapper_pg_dir[1024]; pgd_t swapper_pg_dir[1024];
unsigned long *empty_zero_page = NULL; unsigned long *empty_zero_page = NULL;
...@@ -71,6 +71,9 @@ void mem_init(void) ...@@ -71,6 +71,9 @@ void mem_init(void)
{ {
unsigned long start; unsigned long start;
#ifdef CONFIG_HIGHMEM
highmem_start_page = phys_page(__pa(high_physmem));
#endif
max_mapnr = num_physpages = max_low_pfn; max_mapnr = num_physpages = max_low_pfn;
/* clear the zero-page */ /* clear the zero-page */
...@@ -93,16 +96,168 @@ void mem_init(void) ...@@ -93,16 +96,168 @@ void mem_init(void)
} }
/* this will put all low memory onto the freelists */ /* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem(); totalram_pages = free_all_bootmem();
totalram_pages += highmem >> PAGE_SHIFT;
printk(KERN_INFO "Memory: %luk available\n", printk(KERN_INFO "Memory: %luk available\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1; kmalloc_ok = 1;
} }
#if CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
kmap_prot = PAGE_KERNEL;
}
#endif /* CONFIG_HIGHMEM */
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int i, j;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
j = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pmd = (pmd_t *)pgd;
for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) pte));
if (pte != pte_offset(pmd, 0))
BUG();
}
vaddr += PMD_SIZE;
}
j = 0;
}
}
int init_maps(struct mem_region *region)
{
struct page *p, *map;
int i, n, len;
if(region == &physmem_region){
region->mem_map = mem_map;
return(0);
}
else if(region->mem_map != NULL) return(0);
n = region->len >> PAGE_SHIFT;
len = n * sizeof(struct page);
if(kmalloc_ok){
map = kmalloc(len, GFP_KERNEL);
if(map == NULL) map = vmalloc(len);
}
else map = alloc_bootmem_low_pages(len);
if(map == NULL)
return(-ENOMEM);
for(i = 0; i < n; i++){
p = &map[i];
set_page_count(p, 0);
SetPageReserved(p);
INIT_LIST_HEAD(&p->list);
}
region->mem_map = map;
return(0);
}
static int setup_one_range(int fd, char *driver, unsigned long start, int len,
struct mem_region *region)
{
int i;
for(i = 0; i < NREGIONS; i++){
if(regions[i] == NULL) break;
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
return(-1);
}
init_range(i, fd, driver, start, len, region);
return(i);
}
#ifdef CONFIG_HIGHMEM
static void init_highmem(void)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long vaddr;
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pmd = pmd_offset(pgd, vaddr);
pte = pte_offset(pmd, vaddr);
pkmap_page_table = pte;
kmap_init();
}
void setup_highmem(unsigned long len)
{
struct mem_region *region;
struct page *page, *map;
unsigned long phys;
int i, cur, index;
phys = physmem_size;
do {
cur = min(len, (unsigned long) REGION_SIZE);
i = setup_one_range(-1, NULL, -1, cur, NULL);
if(i == -1){
printk("setup_highmem - setup_one_range failed\n");
return;
}
region = regions[i];
index = phys / PAGE_SIZE;
region->mem_map = &mem_map[index];
map = region->mem_map;
for(i = 0; i < (cur >> PAGE_SHIFT); i++){
page = &map[i];
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
atomic_set(&page->count, 1);
__free_page(page);
}
phys += cur;
len -= cur;
} while(len > 0);
}
#endif
void paging_init(void) void paging_init(void)
{ {
struct mem_region *region; struct mem_region *region;
unsigned long zones_size[MAX_NR_ZONES], start, end; unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr;
int i, index; int i, index;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
...@@ -111,6 +266,7 @@ void paging_init(void) ...@@ -111,6 +266,7 @@ void paging_init(void)
zones_size[i] = 0; zones_size[i] = 0;
zones_size[0] = (high_physmem >> PAGE_SHIFT) - zones_size[0] = (high_physmem >> PAGE_SHIFT) -
(uml_physmem >> PAGE_SHIFT); (uml_physmem >> PAGE_SHIFT);
zones_size[2] = highmem >> PAGE_SHIFT;
free_area_init(zones_size); free_area_init(zones_size);
start = phys_region_index(__pa(uml_physmem)); start = phys_region_index(__pa(uml_physmem));
end = phys_region_index(__pa(high_physmem - 1)); end = phys_region_index(__pa(high_physmem - 1));
...@@ -120,6 +276,18 @@ void paging_init(void) ...@@ -120,6 +276,18 @@ void paging_init(void)
region->mem_map = &mem_map[index]; region->mem_map = &mem_map[index];
if(i > start) free_bootmem(__pa(region->start), region->len); if(i > start) free_bootmem(__pa(region->start), region->len);
} }
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
#if CONFIG_HIGHMEM
init_highmem();
setup_highmem(highmem);
#endif
} }
pte_t __bad_page(void) pte_t __bad_page(void)
...@@ -220,6 +388,8 @@ struct page *arch_validate(struct page *page, int mask, int order) ...@@ -220,6 +388,8 @@ struct page *arch_validate(struct page *page, int mask, int order)
again: again:
if(page == NULL) return(page); if(page == NULL) return(page);
if(PageHighMem(page)) return(page);
addr = (unsigned long) page_address(page); addr = (unsigned long) page_address(page);
for(i = 0; i < (1 << order); i++){ for(i = 0; i < (1 << order); i++){
current->thread.fault_addr = (void *) addr; current->thread.fault_addr = (void *) addr;
...@@ -315,56 +485,22 @@ int nregions(void) ...@@ -315,56 +485,22 @@ int nregions(void)
return(NREGIONS); return(NREGIONS);
} }
int init_maps(struct mem_region *region) void setup_range(int fd, char *driver, unsigned long start, unsigned long len,
{ int need_vm, struct mem_region *region, void *reserved)
struct page *p, *map;
int i, n;
if(region == &physmem_region){
region->mem_map = mem_map;
return(0);
}
else if(region->mem_map != NULL) return(0);
n = region->len >> PAGE_SHIFT;
map = kmalloc(n * sizeof(struct page), GFP_KERNEL);
if(map == NULL) map = vmalloc(n * sizeof(struct page));
if(map == NULL)
return(-ENOMEM);
for(i = 0; i < n; i++){
p = &map[i];
set_page_count(p, 0);
SetPageReserved(p);
INIT_LIST_HEAD(&p->list);
}
region->mem_map = map;
return(0);
}
void setup_range(int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region, void *reserved)
{ {
int i, incr; int i, cur;
i = 0;
do { do {
for(; i < NREGIONS; i++){ cur = min(len, (unsigned long) REGION_SIZE);
if(regions[i] == NULL) break; i = setup_one_range(fd, driver, start, cur, region);
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
return;
}
setup_one_range(i, fd, driver, start, len, region);
region = regions[i]; region = regions[i];
if(setup_region(region, reserved)){ if(need_vm && setup_region(region, reserved)){
kfree(region); kfree(region);
regions[i] = NULL; regions[i] = NULL;
return; return;
} }
incr = min(len, (unsigned long) REGION_SIZE); start += cur;
start += incr; len -= cur;
len -= incr;
} while(len > 0); } while(len > 0);
} }
...@@ -399,7 +535,7 @@ int setup_iomem(void) ...@@ -399,7 +535,7 @@ int setup_iomem(void)
for(i = 0; i < num_iomem_regions; i++){ for(i = 0; i < num_iomem_regions; i++){
iomem = &iomem_regions[i]; iomem = &iomem_regions[i];
setup_range(iomem->fd, iomem->name, -1, iomem->size, NULL, setup_range(iomem->fd, iomem->name, -1, iomem->size, 1, NULL,
NULL); NULL);
} }
return(0); return(0);
...@@ -430,7 +566,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, ...@@ -430,7 +566,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
if((region == NULL) || (reserved == NULL)) if((region == NULL) || (reserved == NULL))
panic("Couldn't allocate physmem region or vm " panic("Couldn't allocate physmem region or vm "
"reservation\n"); "reservation\n");
setup_range(-1, NULL, start, cur, region, reserved); setup_range(-1, NULL, start, cur, 1, region, reserved);
if(do_free){ if(do_free){
unsigned long reserve = reserve_end - start; unsigned long reserve = reserve_end - start;
...@@ -535,7 +671,7 @@ struct page *phys_to_page(unsigned long phys) ...@@ -535,7 +671,7 @@ struct page *phys_to_page(unsigned long phys)
return(mem_map + (phys_offset(phys) >> PAGE_SHIFT)); return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
} }
int setup_mem_maps(void) static int setup_mem_maps(void)
{ {
struct mem_region *region; struct mem_region *region;
int i; int i;
......
...@@ -77,7 +77,7 @@ int create_mem_file(unsigned long len) ...@@ -77,7 +77,7 @@ int create_mem_file(unsigned long len)
return(fd); return(fd);
} }
void setup_one_range(int n, int fd, char *driver, unsigned long start, void init_range(int n, int fd, char *driver, unsigned long start,
unsigned long len, struct mem_region *region) unsigned long len, struct mem_region *region)
{ {
if(fd == -1) if(fd == -1)
......
...@@ -528,7 +528,7 @@ unsigned long um_virt_to_phys(void *t, unsigned long addr) ...@@ -528,7 +528,7 @@ unsigned long um_virt_to_phys(void *t, unsigned long addr)
char *current_cmd(void) char *current_cmd(void)
{ {
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
return("(Unknown)"); return("(Unknown)");
#else #else
unsigned long addr = um_virt_to_phys(current, current->mm->arg_start); unsigned long addr = um_virt_to_phys(current, current->mm->arg_start);
......
...@@ -178,6 +178,11 @@ void flush_tlb_kernel_vm(void) ...@@ -178,6 +178,11 @@ void flush_tlb_kernel_vm(void)
flush_tlb_kernel_range(start_vm, end_vm); flush_tlb_kernel_range(start_vm, end_vm);
} }
void __flush_tlb_one(unsigned long addr)
{
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include "linux/config.h" #include "linux/config.h"
#include "linux/kernel.h"
#include "linux/sched.h" #include "linux/sched.h"
#include "linux/notifier.h" #include "linux/notifier.h"
#include "linux/mm.h" #include "linux/mm.h"
...@@ -109,8 +110,6 @@ static int start_kernel_proc(void *unused) ...@@ -109,8 +110,6 @@ static int start_kernel_proc(void *unused)
return(0); return(0);
} }
extern unsigned long high_physmem;
#ifdef CONFIG_HOST_2G_2G #ifdef CONFIG_HOST_2G_2G
#define TOP 0x80000000 #define TOP 0x80000000
#else #else
...@@ -263,10 +262,12 @@ unsigned long brk_start; ...@@ -263,10 +262,12 @@ unsigned long brk_start;
static struct vm_reserved kernel_vm_reserved; static struct vm_reserved kernel_vm_reserved;
#define MIN_VMALLOC (32 * 1024 * 1024)
int linux_main(int argc, char **argv) int linux_main(int argc, char **argv)
{ {
unsigned long avail; unsigned long avail;
unsigned long virtmem_size; unsigned long virtmem_size, max_physmem;
unsigned int i, add, err; unsigned int i, add, err;
void *sp; void *sp;
...@@ -278,7 +279,7 @@ int linux_main(int argc, char **argv) ...@@ -278,7 +279,7 @@ int linux_main(int argc, char **argv)
} }
if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE); if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE);
if(!jail) if(!jail || debug)
remap_data(ROUND_DOWN(&_stext), ROUND_UP(&_etext), 1); remap_data(ROUND_DOWN(&_stext), ROUND_UP(&_etext), 1);
remap_data(ROUND_DOWN(&_sdata), ROUND_UP(&_edata), 1); remap_data(ROUND_DOWN(&_sdata), ROUND_UP(&_edata), 1);
brk_start = (unsigned long) sbrk(0); brk_start = (unsigned long) sbrk(0);
...@@ -295,20 +296,20 @@ int linux_main(int argc, char **argv) ...@@ -295,20 +296,20 @@ int linux_main(int argc, char **argv)
argv1_end = &argv[1][strlen(argv[1])]; argv1_end = &argv[1][strlen(argv[1])];
set_usable_vm(uml_physmem, get_kmem_end()); set_usable_vm(uml_physmem, get_kmem_end());
highmem = 0;
max_physmem = get_kmem_end() - uml_physmem - MIN_VMALLOC;
if(physmem_size > max_physmem){
highmem = physmem_size - max_physmem;
physmem_size -= highmem;
}
high_physmem = uml_physmem + physmem_size; high_physmem = uml_physmem + physmem_size;
high_memory = (void *) high_physmem; high_memory = (void *) high_physmem;
setup_physmem(uml_physmem, uml_reserved, physmem_size);
/* Kernel vm starts after physical memory and is either the size
* of physical memory or the remaining space left in the kernel
* area of the address space, whichever is smaller.
*/
start_vm = VMALLOC_START; start_vm = VMALLOC_START;
if(start_vm >= get_kmem_end())
panic("Physical memory too large to allow any kernel "
"virtual memory");
setup_physmem(uml_physmem, uml_reserved, physmem_size);
virtmem_size = physmem_size; virtmem_size = physmem_size;
avail = get_kmem_end() - start_vm; avail = get_kmem_end() - start_vm;
if(physmem_size > avail) virtmem_size = avail; if(physmem_size > avail) virtmem_size = avail;
......
#ifndef __UM_FIXMAP_H
#define __UM_FIXMAP_H
#include <linux/config.h>
#include <asm/kmap_types.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
/*
* on UP currently we will have no trace of the fixmap mechanizm,
* no page table allocations, etc. This might change in the
* future, say framebuffers for the console driver(s) could be
* fix-mapped?
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap (enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
/*
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
extern unsigned long get_kmem_end(void);
#define FIXADDR_TOP (get_kmem_end() - 0x2000)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
#endif
#ifndef __UM_HIGHMEM_H #ifndef __UM_HIGHMEM_H
#define __UM_HIGHMEM_H #define __UM_HIGHMEM_H
#include "asm/page.h"
#include "asm/fixmap.h"
#include "asm/arch/highmem.h" #include "asm/arch/highmem.h"
#undef PKMAP_BASE
#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
#endif #endif
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __UM_PGALLOC_H #define __UM_PGALLOC_H
#include "linux/mm.h" #include "linux/mm.h"
#include "asm/fixmap.h"
#define pmd_populate_kernel(mm, pmd, pte) \ #define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
......
...@@ -62,12 +62,16 @@ extern unsigned long *empty_zero_page; ...@@ -62,12 +62,16 @@ extern unsigned long *empty_zero_page;
*/ */
extern unsigned long high_physmem; extern unsigned long high_physmem;
extern unsigned long end_vm;
#define VMALLOC_OFFSET (__va_space) #define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (end_vm)
#if CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
#define _PAGE_PRESENT 0x001 #define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002 #define _PAGE_NEWPAGE 0x002
...@@ -333,6 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -333,6 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
/* to find an entry in a page-table-directory */ /* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) \ #define pgd_offset(mm, address) \
...@@ -341,6 +346,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -341,6 +346,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pmd_offset(address) \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{ {
......
...@@ -27,6 +27,7 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -27,6 +27,7 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_kernel_vm(void); extern void flush_tlb_kernel_vm(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void __flush_tlb_one(unsigned long addr);
static inline void flush_tlb_pgtables(struct mm_struct *mm, static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment