modutil.c 2.49 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*  arch/x86_64/mm/modutil.c
 *
 *  Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 *  Based upon code written by Linus Torvalds and others.
 * 
 *  Blatantly copied from sparc64 for x86-64 by Andi Kleen. 
 *  Should use direct mapping with 2MB pages. This would need extension
 *  of the kernel mapping.
 */
 
#include <linux/slab.h>
#include <linux/vmalloc.h>

#include <asm/uaccess.h>
#include <asm/system.h>
16 17
#include <asm/page.h>
#include <asm/pgtable.h>
18 19 20 21 22 23

static struct vm_struct * modvmlist = NULL;

void module_unmap (void * addr)
{
	struct vm_struct **p, *tmp;
24
	int i;
25 26 27 28 29 30 31 32 33 34

	if (!addr)
		return;
	if ((PAGE_SIZE-1) & (unsigned long) addr) {
		printk("Trying to unmap module with bad address (%p)\n", addr);
		return;
	}
	for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) {
		if (tmp->addr == addr) {
			*p = tmp->next;
35
			goto found;
36 37 38
		}
	}
	printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
39 40 41 42 43 44 45 46 47 48 49
	return;
 found:
	unmap_vm_area(tmp);
	for (i = 0; i < tmp->nr_pages; i++) {
		if (unlikely(!tmp->pages[i]))
			BUG();
		__free_page(tmp->pages[i]);
	}
	
	kfree(tmp->pages);
	kfree(tmp);					
50 51 52 53 54
}

void * module_map (unsigned long size)
{
	struct vm_struct **p, *tmp, *area;
55 56 57
	struct page **pages;
	void * addr;
	unsigned int nr_pages, array_size, i;
58 59

	size = PAGE_ALIGN(size);
60 61
	if (!size || size > MODULES_LEN)
		return NULL;
62 63 64 65 66 67 68
		
	addr = (void *) MODULES_VADDR;
	for (p = &modvmlist; (tmp = *p) ; p = &tmp->next) {
		if (size + (unsigned long) addr < (unsigned long) tmp->addr)
			break;
		addr = (void *) (tmp->size + (unsigned long) tmp->addr);
	}
69 70
	if ((unsigned long) addr + size >= MODULES_END)
		return NULL;
71 72
	
	area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
73 74
	if (!area)
		return NULL;
75 76 77
	area->size = size + PAGE_SIZE;
	area->addr = addr;
	area->next = *p;
78 79 80
	area->pages = NULL;
	area->nr_pages = 0;
	area->phys_addr = 0;
81 82
	*p = area;

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	nr_pages = size >> PAGE_SHIFT;
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	area->pages = pages = kmalloc(array_size, GFP_KERNEL);
	if (!area->pages)
		goto fail;

	memset(area->pages, 0, array_size);

	for (i = 0; i < area->nr_pages; i++) {
		area->pages[i] = alloc_page(GFP_KERNEL);
		if (unlikely(!area->pages[i]))
			goto fail;
	}
	
	if (map_vm_area(area, PAGE_KERNEL, &pages)) {
		unmap_vm_area(area);
		goto fail;
102
	}
103 104 105 106 107 108 109 110 111 112 113 114 115 116

	return area->addr;

fail:
	if (area->pages) {
		for (i = 0; i < area->nr_pages; i++) {
			if (area->pages[i])
				__free_page(area->pages[i]);
		}
		kfree(area->pages);
	}
	kfree(area);

	return NULL;
117
}
118