diff --git a/arch/um/config.in b/arch/um/config.in index fe6a5283cfcbf90d64a9a0339a881d9cdf5e5656..32cac03d468fd8875b952bd78714c81a71813dae 100644 --- a/arch/um/config.in +++ b/arch/um/config.in @@ -32,6 +32,7 @@ bool 'Symmetric multi-processing support' CONFIG_UML_SMP define_bool CONFIG_SMP $CONFIG_UML_SMP int 'Nesting level' CONFIG_NEST_LEVEL 0 int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1 +bool 'Highmem support' CONFIG_HIGHMEM endmenu mainmenu_option next_comment diff --git a/arch/um/defconfig b/arch/um/defconfig index bbb14b7d706f2390bbfac97721cf35f7b319367b..89ac76c0e9a8668e9a684dd049a0cb5a39682598 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig @@ -31,6 +31,7 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_SMP is not set CONFIG_NEST_LEVEL=0 CONFIG_KERNEL_HALF_GIGS=1 +# CONFIG_HIGHMEM is not set # # Loadable module support diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h index e3177ae21f9a07cadaacb777453f32a815fcf8e6..4a3e68d56a5a70bf508101ab41397de19f2e024a 100644 --- a/arch/um/include/mem_user.h +++ b/arch/um/include/mem_user.h @@ -34,6 +34,7 @@ struct mem_region { char *driver; + unsigned long start_pfn; unsigned long start; unsigned long len; void *mem_map; @@ -51,8 +52,8 @@ extern unsigned long task_size; extern int init_mem_user(void); extern int create_mem_file(unsigned long len); extern void setup_range(int fd, char *driver, unsigned long start, - unsigned long total, struct mem_region *region, - void *reserved); + unsigned long pfn, unsigned long total, int need_vm, + struct mem_region *region, void *reserved); extern void map(unsigned long virt, unsigned long p, unsigned long len, int r, int w, int x); extern int unmap(void *addr, int len); @@ -62,8 +63,6 @@ extern void setup_memory(void *entry); extern unsigned long find_iomem(char *driver, unsigned long *len_out); extern int init_maps(struct mem_region *region); extern int nregions(void); -extern void setup_one_range(int n, int fd, char *driver, unsigned long start, - unsigned long len, struct mem_region *region); extern int reserve_vm(unsigned long start, unsigned long end, void *e); extern unsigned long get_vm(unsigned long len); extern void setup_physmem(unsigned long start, unsigned long usable, diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h index 1aa4b5bb7f56406b68779a8c69c648d78726dbf0..3a93b60baae3059a6e3ca39d66b815b770adcefd 100644 --- a/arch/um/include/user_util.h +++ b/arch/um/include/user_util.h @@ -27,6 +27,7 @@ extern unsigned long uml_physmem; extern unsigned long uml_reserved; extern unsigned long end_vm; extern unsigned long start_vm; +extern unsigned long highmem; extern int tracing_pid; extern int honeypot; diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 794563d95d9e9f7cc8467e03a6584ef448ea8c0b..f84d1cd4d1ec44a53887369a2db4a407be66482e 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ @@ -27,13 +27,13 @@ #include "init.h" unsigned long high_physmem; - unsigned long low_physmem; unsigned long vm_start; - unsigned long vm_end; +unsigned long highmem; + pgd_t swapper_pg_dir[1024]; unsigned long *empty_zero_page = NULL; @@ -71,7 +71,10 @@ void mem_init(void) { unsigned long start; - max_mapnr = num_physpages = max_low_pfn; + max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT; +#ifdef CONFIG_HIGHMEM + highmem_start_page = phys_page(__pa(high_physmem)); +#endif /* clear the zero-page */ memset((void *) empty_zero_page, 0, PAGE_SIZE); @@ -93,16 +96,189 @@ void mem_init(void) } /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); + totalram_pages = free_all_bootmem(); + totalhigh_pages = highmem >> PAGE_SHIFT; + totalram_pages += totalhigh_pages; + num_physpages = totalram_pages; + max_mapnr = totalram_pages; + max_pfn = totalram_pages; printk(KERN_INFO "Memory: %luk available\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10)); kmalloc_ok = 1; } +#if CONFIG_HIGHMEM +pte_t *kmap_pte; +pgprot_t kmap_prot; + +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) + +void __init kmap_init(void) +{ + unsigned long kmap_vstart; + + /* cache the first kmap pte */ + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); + kmap_pte = kmap_get_fixmap_pte(kmap_vstart); + + kmap_prot = PAGE_KERNEL; +} +#endif /* CONFIG_HIGHMEM */ + +static void __init fixrange_init(unsigned long start, unsigned long end, + pgd_t *pgd_base) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int i, j; + unsigned long vaddr; + + vaddr = start; + i = __pgd_offset(vaddr); + j = __pmd_offset(vaddr); + pgd = pgd_base + i; + + for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { + pmd = (pmd_t *)pgd; + for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { + if (pmd_none(*pmd)) { + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); + set_pmd(pmd, __pmd(_KERNPG_TABLE + + (unsigned long) __pa(pte))); + if (pte != pte_offset_kernel(pmd, 0)) + BUG(); + } + vaddr += PMD_SIZE; + } + j = 0; + } +} + +int init_maps(struct mem_region *region) +{ + struct page *p, *map; + int i, n, len; + + if(region == &physmem_region){ + region->mem_map = mem_map; + return(0); + } + else if(region->mem_map != NULL) return(0); + + n = region->len >> PAGE_SHIFT; + len = n * sizeof(struct page); + if(kmalloc_ok){ + map = kmalloc(len, GFP_KERNEL); + if(map == NULL) map = vmalloc(len); + } + else map = alloc_bootmem_low_pages(len); + + if(map == NULL) + return(-ENOMEM); + for(i = 0; i < n; i++){ + p = &map[i]; + set_page_count(p, 0); + SetPageReserved(p); + INIT_LIST_HEAD(&p->list); + } + region->mem_map = map; + return(0); +} + +static int setup_one_range(int fd, char *driver, unsigned long start, + unsigned long pfn, int len, + struct mem_region *region) +{ + int i; + + for(i = 0; i < NREGIONS; i++){ + if(regions[i] == NULL) break; + } + if(i == NREGIONS){ + printk("setup_range : no free regions\n"); + return(-1); + } + + if(fd == -1) + fd = create_mem_file(len); + + if(region == NULL){ + region = alloc_bootmem_low_pages(sizeof(*region)); + if(region == NULL) + panic("Failed to allocating mem_region"); + } + + *region = ((struct mem_region) { driver : driver, + start_pfn : pfn, + start : start, + len : len, + fd : fd } ); + regions[i] = region; + return(i); +} + +#ifdef CONFIG_HIGHMEM +static void init_highmem(void) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + unsigned long vaddr; + + /* + * Permanent kmaps: + */ + vaddr = PKMAP_BASE; + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); + + pgd = swapper_pg_dir + __pgd_offset(vaddr); + pmd = pmd_offset(pgd, vaddr); + pte = pte_offset_kernel(pmd, vaddr); + pkmap_page_table = pte; + + kmap_init(); +} + +void setup_highmem(unsigned long len) +{ + struct mem_region *region; + struct page *page, *map; + unsigned long phys; + int i, cur, index; + + phys = physmem_size; + do { + cur = min(len, (unsigned long) REGION_SIZE); + i = setup_one_range(-1, NULL, -1, phys >> PAGE_SHIFT, cur, + NULL); + if(i == -1){ + printk("setup_highmem - setup_one_range failed\n"); + return; + } + region = regions[i]; + index = phys / PAGE_SIZE; + region->mem_map = &mem_map[index]; + + map = region->mem_map; + for(i = 0; i < (cur >> PAGE_SHIFT); i++){ + page = &map[i]; + ClearPageReserved(page); + set_bit(PG_highmem, &page->flags); + atomic_set(&page->count, 1); + __free_page(page); + } + phys += cur; + len -= cur; + } while(len > 0); +} +#endif + void paging_init(void) { struct mem_region *region; - unsigned long zones_size[MAX_NR_ZONES], start, end; + unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr; int i, index; empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); @@ -111,6 +287,7 @@ void paging_init(void) zones_size[i] = 0; zones_size[0] = (high_physmem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT); + zones_size[2] = highmem >> PAGE_SHIFT; free_area_init(zones_size); start = phys_region_index(__pa(uml_physmem)); end = phys_region_index(__pa(high_physmem - 1)); @@ -120,6 +297,18 @@ void paging_init(void) region->mem_map = &mem_map[index]; if(i > start) free_bootmem(__pa(region->start), region->len); } + + /* + * Fixed mappings, only the page table structure has to be + * created - mappings will be set by set_fixmap(): + */ + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); + +#if CONFIG_HIGHMEM + init_highmem(); + setup_highmem(highmem); +#endif } pte_t __bad_page(void) @@ -220,6 +409,8 @@ struct page *arch_validate(struct page *page, int mask, int order) again: if(page == NULL) return(page); + if(PageHighMem(page)) return(page); + addr = (unsigned long) page_address(page); for(i = 0; i < (1 << order); i++){ current->thread.fault_addr = (void *) addr; @@ -315,56 +506,24 @@ int nregions(void) return(NREGIONS); } -int init_maps(struct mem_region *region) -{ - struct page *p, *map; - int i, n; - - if(region == &physmem_region){ - region->mem_map = mem_map; - return(0); - } - else if(region->mem_map != NULL) return(0); - - n = region->len >> PAGE_SHIFT; - map = kmalloc(n * sizeof(struct page), GFP_KERNEL); - if(map == NULL) map = vmalloc(n * sizeof(struct page)); - if(map == NULL) - return(-ENOMEM); - for(i = 0; i < n; i++){ - p = &map[i]; - set_page_count(p, 0); - SetPageReserved(p); - INIT_LIST_HEAD(&p->list); - } - region->mem_map = map; - return(0); -} - -void setup_range(int fd, char *driver, unsigned long start, - unsigned long len, struct mem_region *region, void *reserved) +void setup_range(int fd, char *driver, unsigned long start, unsigned long pfn, + unsigned long len, int need_vm, struct mem_region *region, + void *reserved) { - int i, incr; + int i, cur; - i = 0; do { - for(; i < NREGIONS; i++){ - if(regions[i] == NULL) break; - } - if(i == NREGIONS){ - printk("setup_range : no free regions\n"); - return; - } - setup_one_range(i, fd, driver, start, len, region); + cur = min(len, (unsigned long) REGION_SIZE); + i = setup_one_range(fd, driver, start, pfn, cur, region); region = regions[i]; - if(setup_region(region, reserved)){ + if(need_vm && setup_region(region, reserved)){ kfree(region); regions[i] = NULL; return; } - incr = min(len, (unsigned long) REGION_SIZE); - start += incr; - len -= incr; + start += cur; + if(pfn != -1) pfn += cur; + len -= cur; } while(len > 0); } @@ -399,8 +558,8 @@ int setup_iomem(void) for(i = 0; i < num_iomem_regions; i++){ iomem = &iomem_regions[i]; - setup_range(iomem->fd, iomem->name, -1, iomem->size, NULL, - NULL); + setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1, + NULL, NULL); } return(0); } @@ -418,7 +577,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, { struct mem_region *region = &physmem_region; struct vm_reserved *reserved = &physmem_reserved; - unsigned long cur; + unsigned long cur, pfn = 0; int do_free = 1, bootmap_size; do { @@ -430,7 +589,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, if((region == NULL) || (reserved == NULL)) panic("Couldn't allocate physmem region or vm " "reservation\n"); - setup_range(-1, NULL, start, cur, region, reserved); + setup_range(-1, NULL, start, pfn, cur, 1, region, reserved); if(do_free){ unsigned long reserve = reserve_end - start; @@ -443,6 +602,7 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, do_free = 0; } start += cur; + pfn += cur >> PAGE_SHIFT; len -= cur; region = NULL; reserved = NULL; @@ -492,6 +652,56 @@ struct mem_region *page_region(struct page *page, int *index_out) return(NULL); } +unsigned long page_to_pfn(struct page *page) +{ + struct mem_region *region = page_region(page, NULL); + + return(region->start_pfn + (page - (struct page *) region->mem_map)); +} + +struct mem_region *pfn_to_region(unsigned long pfn, int *index_out) +{ + struct mem_region *region; + int i; + + for(i = 0; i < NREGIONS; i++){ + region = regions[i]; + if(region == NULL) + continue; + + if((region->start_pfn <= pfn) && + (region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){ + if(index_out != NULL) + *index_out = i; + return(region); + } + } + return(NULL); +} + +struct page *pfn_to_page(unsigned long pfn) +{ + struct mem_region *region = pfn_to_region(pfn, NULL); + struct page *mem_map = (struct page *) region->mem_map; + + return(&mem_map[pfn - region->start_pfn]); +} + +unsigned long phys_to_pfn(unsigned long p) +{ + struct mem_region *region = regions[phys_region_index(p)]; + + return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT)); +} + +unsigned long pfn_to_phys(unsigned long pfn) +{ + int n; + struct mem_region *region = pfn_to_region(pfn, &n); + + return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n)); +} + struct page *page_mem_map(struct page *page) { return((struct page *) page_region(page, NULL)->mem_map); @@ -535,7 +745,7 @@ struct page *phys_to_page(unsigned long phys) return(mem_map + (phys_offset(phys) >> PAGE_SHIFT)); } -int setup_mem_maps(void) +static int setup_mem_maps(void) { struct mem_region *region; int i; @@ -594,7 +804,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; do { - pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0); + pte = alloc_pages(GFP_KERNEL, 0); if (pte) clear_highpage(pte); else { diff --git a/arch/um/kernel/mem_user.c b/arch/um/kernel/mem_user.c index 38069704cf53398dcf6350ddd85f17d67ea9510a..8e036687f717423cfd1dfda5ed962bd888715482 100644 --- a/arch/um/kernel/mem_user.c +++ b/arch/um/kernel/mem_user.c @@ -77,25 +77,6 @@ int create_mem_file(unsigned long len) return(fd); } -void setup_one_range(int n, int fd, char *driver, unsigned long start, - unsigned long len, struct mem_region *region) -{ - if(fd == -1) - fd = create_mem_file(len); - if(region == NULL){ - region = malloc(sizeof(*region)); - if(region == NULL){ - perror("Allocating mem_region"); - exit(1); - } - } - *region = ((struct mem_region) { driver : driver, - start : start, - len : len, - fd : fd } ); - regions[n] = region; -} - int setup_region(struct mem_region *region, void *entry) { void *loc, *start; diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c index afbeccd077b15c4f5a8707cd6eb9fe0e154b15df..5eecc8260596e03ce8c0b9158f3027b95b8d06fe 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process_kern.c @@ -528,7 +528,7 @@ unsigned long um_virt_to_phys(void *t, unsigned long addr) char *current_cmd(void) { -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) return("(Unknown)"); #else unsigned long addr = um_virt_to_phys(current, current->mm->arg_start); diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 2edf3ba881c5422e2a293552a57997e1b31972d8..2a094df526418bb7def63722655f7fbf4c0a3781 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -178,6 +178,11 @@ void flush_tlb_kernel_vm(void) flush_tlb_kernel_range(start_vm, end_vm); } +void __flush_tlb_one(unsigned long addr) +{ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); +} + void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 6546de92fa449178e766f8f66b7769ef50afc0b6..5452d3f2563cfe23a71a2caa89712ad98f1b338d 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -4,6 +4,7 @@ */ #include "linux/config.h" +#include "linux/kernel.h" #include "linux/sched.h" #include "linux/notifier.h" #include "linux/mm.h" @@ -109,8 +110,6 @@ static int start_kernel_proc(void *unused) return(0); } -extern unsigned long high_physmem; - #ifdef CONFIG_HOST_2G_2G #define TOP 0x80000000 #else @@ -160,7 +159,7 @@ void set_cmdline(char *cmd) snprintf(ptr, (argv1_end - ptr) * sizeof(*ptr), " [%s]", cmd); memset(argv1_begin + strlen(argv1_begin), '\0', - argv1_end - argv1_begin - strlen(argv1_begin)); + argv1_end - argv1_begin - strlen(argv1_begin)); } static char *usage_string = @@ -263,10 +262,12 @@ unsigned long brk_start; static struct vm_reserved kernel_vm_reserved; +#define MIN_VMALLOC (32 * 1024 * 1024) + int linux_main(int argc, char **argv) { unsigned long avail; - unsigned long virtmem_size; + unsigned long virtmem_size, max_physmem; unsigned int i, add, err; void *sp; @@ -278,7 +279,7 @@ int linux_main(int argc, char **argv) } if(have_root == 0) add_arg(saved_command_line, DEFAULT_COMMAND_LINE); - if(!jail) + if(!jail || debug) remap_data(ROUND_DOWN(&_stext), ROUND_UP(&_etext), 1); remap_data(ROUND_DOWN(&_sdata), ROUND_UP(&_edata), 1); brk_start = (unsigned long) sbrk(0); @@ -295,20 +296,20 @@ int linux_main(int argc, char **argv) argv1_end = &argv[1][strlen(argv[1])]; set_usable_vm(uml_physmem, get_kmem_end()); + + highmem = 0; + max_physmem = get_kmem_end() - uml_physmem - MIN_VMALLOC; + if(physmem_size > max_physmem){ + highmem = physmem_size - max_physmem; + physmem_size -= highmem; + } + high_physmem = uml_physmem + physmem_size; high_memory = (void *) high_physmem; - setup_physmem(uml_physmem, uml_reserved, physmem_size); - - /* Kernel vm starts after physical memory and is either the size - * of physical memory or the remaining space left in the kernel - * area of the address space, whichever is smaller. - */ start_vm = VMALLOC_START; - if(start_vm >= get_kmem_end()) - panic("Physical memory too large to allow any kernel " - "virtual memory"); + setup_physmem(uml_physmem, uml_reserved, physmem_size); virtmem_size = physmem_size; avail = get_kmem_end() - start_vm; if(physmem_size > avail) virtmem_size = avail; diff --git a/include/asm-um/fixmap.h b/include/asm-um/fixmap.h new file mode 100644 index 0000000000000000000000000000000000000000..0e8a4c1ace9c48d2edb7f02f5fbfd32010069eb4 --- /dev/null +++ b/include/asm-um/fixmap.h @@ -0,0 +1,96 @@ +#ifndef __UM_FIXMAP_H +#define __UM_FIXMAP_H + +#include <linux/config.h> +#include <asm/kmap_types.h> + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#ifdef CONFIG_HIGHMEM + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, +#endif + __end_of_fixed_addresses +}; + +extern void __set_fixmap (enum fixed_addresses idx, + unsigned long phys, pgprot_t flags); + +#define set_fixmap(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL) +/* + * Some hardware wants to get fixmapped without caching. + */ +#define set_fixmap_nocache(idx, phys) \ + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ +extern unsigned long get_kmem_end(void); + +#define FIXADDR_TOP (get_kmem_end() - 0x2000) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + /* + * this branch gets completely eliminated after inlining, + * except when someone tries to use fixaddr indices in an + * illegal way. (such as mixing up address types or using + * out-of-range indices). + * + * If it doesn't get removed, the linker will complain + * loudly with a reasonably clear error message.. + */ + if (idx >= __end_of_fixed_addresses) + __this_fixmap_does_not_exist(); + + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif diff --git a/include/asm-um/highmem.h b/include/asm-um/highmem.h index 6713fb2a4896e11a90be2cd934e7f12f8cacdc44..36974cb8abc7925124025c2b41d94e35783ffbb2 100644 --- a/include/asm-um/highmem.h +++ b/include/asm-um/highmem.h @@ -1,6 +1,12 @@ #ifndef __UM_HIGHMEM_H #define __UM_HIGHMEM_H +#include "asm/page.h" +#include "asm/fixmap.h" #include "asm/arch/highmem.h" +#undef PKMAP_BASE + +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) + #endif diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 0f7e84c3196a3e3f28a1d7f11709f2cc45c71fee..e3279d1d0b315d543f77a1c4dfcd182def7d8335 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -43,10 +43,11 @@ extern void *region_va(unsigned long phys); #define __pa(virt) region_pa((void *) (virt)) #define __va(phys) region_va((unsigned long) (phys)) +extern unsigned long page_to_pfn(struct page *page); +extern struct page *pfn_to_page(unsigned long pfn); + extern struct page *phys_to_page(unsigned long phys); -#define pfn_to_page(pfn) (phys_to_page(pfn << PAGE_SHIFT)) -#define page_to_pfn(page) (page_to_phys(page) >> PAGE_SHIFT) #define virt_to_page(v) (phys_to_page(__pa(v))) extern struct page *page_mem_map(struct page *page); diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h index 4620297e2cc748648f8f910af021de06df70bdb1..73973aeaf48289c91af03b1ca7729ae094202b13 100644 --- a/include/asm-um/pgalloc.h +++ b/include/asm-um/pgalloc.h @@ -8,6 +8,7 @@ #define __UM_PGALLOC_H #include "linux/mm.h" +#include "asm/fixmap.h" #define pmd_populate_kernel(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) @@ -15,7 +16,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) { - set_pmd(pmd, __pmd(_PAGE_TABLE + phys_addr(page_to_phys(pte)))); + set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); } extern pgd_t *pgd_alloc(struct mm_struct *); diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index d74468293ea801e3f1dc9a6a2ebab54da345a3ee..89f4343a40778f4db736f9a544cfd5b3d5221943 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -10,6 +10,7 @@ #include "linux/sched.h" #include "asm/processor.h" #include "asm/page.h" +#include "asm/fixmap.h" extern pgd_t swapper_pg_dir[1024]; @@ -62,12 +63,16 @@ extern unsigned long *empty_zero_page; */ extern unsigned long high_physmem; -extern unsigned long end_vm; #define VMALLOC_OFFSET (__va_space) #define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END (end_vm) + +#if CONFIG_HIGHMEM +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#endif #define _PAGE_PRESENT 0x001 #define _PAGE_NEWPAGE 0x002 @@ -183,15 +188,17 @@ static inline void pgd_clear(pgd_t * pgdp) { } extern struct page *pte_mem_map(pte_t pte); extern struct page *phys_mem_map(unsigned long phys); +extern unsigned long phys_to_pfn(unsigned long p); +extern unsigned long pfn_to_phys(unsigned long pfn); #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) #define mk_phys(a, r) ((a) + (r << REGION_SHIFT)) #define phys_addr(p) ((p) & ~REGION_MASK) #define phys_page(p) (phys_mem_map(p) + ((phys_addr(p)) >> PAGE_SHIFT)) -#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) -#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pte_pfn(x) phys_to_pfn(pte_val(x)) +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) static inline pte_t pte_mknewprot(pte_t pte) { @@ -333,6 +340,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* to find an entry in a page-table-directory. */ #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define __pgd_offset(address) pgd_index(address) /* to find an entry in a page-table-directory */ #define pgd_offset(mm, address) \ @@ -341,6 +349,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define __pmd_offset(address) \ + (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + /* Find an entry in the second-level page table.. */ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) { diff --git a/include/asm-um/tlbflush.h b/include/asm-um/tlbflush.h index fd55c6efb4d319f7edc3bc69879d54a9ab684bab..522aa30f7eaa62d12d6dcec037053eee350846a0 100644 --- a/include/asm-um/tlbflush.h +++ b/include/asm-um/tlbflush.h @@ -27,6 +27,7 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); extern void flush_tlb_kernel_vm(void); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void __flush_tlb_one(unsigned long addr); static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)