Commit b683effb authored by David Howells's avatar David Howells Committed by Linus Torvalds

[PATCH] Further nommu changes

The attached patch further changes the nommu stuff previously changed. These
new changes do the following:

 (0) Some additional variables have been defined to make nommu even compile.

 (1) Get rid of the alternate vm_area_struct. The nommu mmap now uses the
     normal one. There's a refcount field added to the normal one, contingent
     on !CONFIG_MMU.

 (2) vm_rb is now used to keep track of the VMAs in an rbtree rather than
     adding a separate list.

 (3) mm_tblock_struct is now vm_list_struct.

 (4) put_vma() now calls vma->vm_ops->close() if available on nommu.

 (5) A dummy generic_file_vm_ops has been provided. It does nothing, but
     permits tiny-shmem to compile.

     tiny-shmem and ramfs still need attention, such that files contained
     therein can be mmapped shared-writably to some extent on nommu.
Signed-Off-By: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 17c9f1a2
...@@ -58,7 +58,6 @@ extern int sysctl_legacy_va_layout; ...@@ -58,7 +58,6 @@ extern int sysctl_legacy_va_layout;
* space that has a special rule for the page-fault handlers (ie a shared * space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc). * library, the executable area etc).
*/ */
#ifdef CONFIG_MMU
struct vm_area_struct { struct vm_area_struct {
struct mm_struct * vm_mm; /* The address space we belong to. */ struct mm_struct * vm_mm; /* The address space we belong to. */
unsigned long vm_start; /* Our start address within vm_mm. */ unsigned long vm_start; /* Our start address within vm_mm. */
...@@ -107,34 +106,29 @@ struct vm_area_struct { ...@@ -107,34 +106,29 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */ struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */ void * vm_private_data; /* was vm_pte (shared mem) */
#ifndef CONFIG_MMU
atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif #endif
}; };
#else /*
* This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
struct vm_area_struct { * disabled, then there's a single shared list of VMAs maintained by the
struct list_head vm_link; /* system object list */ * system, and mm's subscribe to these individually
atomic_t vm_usage; /* count of refs */ */
unsigned long vm_start; struct vm_list_struct {
unsigned long vm_end; struct vm_list_struct *next;
pgprot_t vm_page_prot; /* access permissions of this VMA */
unsigned long vm_flags;
unsigned long vm_pgoff;
struct file *vm_file; /* file or device mapped */
};
struct mm_tblock_struct {
struct mm_tblock_struct *next;
struct vm_area_struct *vma; struct vm_area_struct *vma;
}; };
extern struct list_head nommu_vma_list; #ifndef CONFIG_MMU
extern struct rb_root nommu_vma_tree;
extern struct rw_semaphore nommu_vma_sem; extern struct rw_semaphore nommu_vma_sem;
extern unsigned int kobjsize(const void *objp); extern unsigned int kobjsize(const void *objp);
#endif #endif
/* /*
......
...@@ -36,14 +36,18 @@ atomic_t vm_committed_space = ATOMIC_INIT(0); ...@@ -36,14 +36,18 @@ atomic_t vm_committed_space = ATOMIC_INIT(0);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
int heap_stack_gap = 0;
EXPORT_SYMBOL(sysctl_max_map_count); EXPORT_SYMBOL(sysctl_max_map_count);
EXPORT_SYMBOL(mem_map); EXPORT_SYMBOL(mem_map);
/* list of shareable VMAs */ /* list of shareable VMAs */
LIST_HEAD(nommu_vma_list); struct rb_root nommu_vma_tree = RB_ROOT;
DECLARE_RWSEM(nommu_vma_sem); DECLARE_RWSEM(nommu_vma_sem);
struct vm_operations_struct generic_file_vm_ops = {
};
void __init prio_tree_init(void) void __init prio_tree_init(void)
{ {
} }
...@@ -273,19 +277,63 @@ static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flag ...@@ -273,19 +277,63 @@ static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flag
#ifdef DEBUG #ifdef DEBUG
static void show_process_blocks(void) static void show_process_blocks(void)
{ {
struct mm_tblock_struct *tblock; struct vm_list_struct *vml;
printk("Process blocks %d:", current->pid); printk("Process blocks %d:", current->pid);
for (tblock = &current->mm->context.tblock; tblock; tblock = tblock->next) { for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
printk(" %p: %p", tblock, tblock->rblock); printk(" %p: %p", vml, vml->vma);
if (tblock->rblock) if (vml->vma)
printk(" (%d @%p #%d)", kobjsize(tblock->rblock->kblock), tblock->rblock->kblock, tblock->rblock->refcount); printk(" (%d @%lx #%d)",
printk(tblock->next ? " ->" : ".\n"); kobjsize((void *) vml->vma->vm_start),
vml->vma->vm_start,
atomic_read(&vml->vma->vm_usage));
printk(vml->next ? " ->" : ".\n");
} }
} }
#endif /* DEBUG */ #endif /* DEBUG */
static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
{
struct vm_area_struct *vma;
struct rb_node *n = nommu_vma_tree.rb_node;
while (n) {
vma = rb_entry(n, struct vm_area_struct, vm_rb);
if (start < vma->vm_start)
n = n->rb_left;
else if (start > vma->vm_start)
n = n->rb_right;
else
return vma;
}
return NULL;
}
static void add_nommu_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *pvma;
struct rb_node **p = &nommu_vma_tree.rb_node;
struct rb_node *parent = NULL;
while (*p) {
parent = *p;
pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
if (vma->vm_start < pvma->vm_start)
p = &(*p)->rb_left;
else if (vma->vm_start > pvma->vm_start)
p = &(*p)->rb_right;
else
BUG(); /* shouldn't happen by this point */
}
rb_link_node(&vma->vm_rb, parent, p);
rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
}
unsigned long do_mmap_pgoff(struct file *file, unsigned long do_mmap_pgoff(struct file *file,
unsigned long addr, unsigned long addr,
unsigned long len, unsigned long len,
...@@ -293,9 +341,9 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -293,9 +341,9 @@ unsigned long do_mmap_pgoff(struct file *file,
unsigned long flags, unsigned long flags,
unsigned long pgoff) unsigned long pgoff)
{ {
struct mm_tblock_struct *tblock = NULL; struct vm_list_struct *vml = NULL;
struct vm_area_struct *vma = NULL, *pvma; struct vm_area_struct *vma = NULL;
struct list_head *p; struct rb_node *rb;
unsigned int vm_flags; unsigned int vm_flags;
void *result; void *result;
int ret, chrdev; int ret, chrdev;
...@@ -334,10 +382,10 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -334,10 +382,10 @@ unsigned long do_mmap_pgoff(struct file *file,
return -EINVAL; return -EINVAL;
/* we're going to need to record the mapping if it works */ /* we're going to need to record the mapping if it works */
tblock = kmalloc(sizeof(struct mm_tblock_struct), GFP_KERNEL); vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
if (!tblock) if (!vml)
goto error_getting_tblock; goto error_getting_vml;
memset(tblock, 0, sizeof(*tblock)); memset(vml, 0, sizeof(*vml));
/* Do simple checking here so the lower-level routines won't have /* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open * to. we assume access permissions have been handled by the open
...@@ -376,7 +424,9 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -376,7 +424,9 @@ unsigned long do_mmap_pgoff(struct file *file,
unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long vmpglen; unsigned long vmpglen;
list_for_each_entry(vma, &nommu_vma_list, vm_link) { for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
vma = rb_entry(rb, struct vm_area_struct, vm_rb);
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
continue; continue;
...@@ -399,7 +449,7 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -399,7 +449,7 @@ unsigned long do_mmap_pgoff(struct file *file,
/* we've found a VMA we can share */ /* we've found a VMA we can share */
atomic_inc(&vma->vm_usage); atomic_inc(&vma->vm_usage);
tblock->vma = vma; vml->vma = vma;
result = (void *) vma->vm_start; result = (void *) vma->vm_start;
goto shared; goto shared;
} }
...@@ -422,7 +472,8 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -422,7 +472,8 @@ unsigned long do_mmap_pgoff(struct file *file,
if (!vma) if (!vma)
goto error_getting_vma; goto error_getting_vma;
INIT_LIST_HEAD(&vma->vm_link); memset(vma, 0, sizeof(*vma));
INIT_LIST_HEAD(&vma->anon_vma_node);
atomic_set(&vma->vm_usage, 1); atomic_set(&vma->vm_usage, 1);
if (file) if (file)
get_file(file); get_file(file);
...@@ -432,7 +483,7 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -432,7 +483,7 @@ unsigned long do_mmap_pgoff(struct file *file,
vma->vm_end = addr + len; vma->vm_end = addr + len;
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
tblock->vma = vma; vml->vma = vma;
/* /*
* determine the object being mapped and call the appropriate * determine the object being mapped and call the appropriate
...@@ -533,19 +584,13 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -533,19 +584,13 @@ unsigned long do_mmap_pgoff(struct file *file,
current->mm->total_vm += len >> PAGE_SHIFT; current->mm->total_vm += len >> PAGE_SHIFT;
list_for_each(p, &nommu_vma_list) { add_nommu_vma(vma);
pvma = list_entry(p, struct vm_area_struct, vm_link);
if (pvma->vm_start > vma->vm_start)
break;
}
list_add_tail(&vma->vm_link, p);
shared: shared:
realalloc += kobjsize(tblock); realalloc += kobjsize(vml);
askedalloc += sizeof(*tblock); askedalloc += sizeof(*vml);
tblock->next = current->mm->context.tblock; vml->next = current->mm->context.vmlist;
current->mm->context.tblock = tblock; current->mm->context.vmlist = vml;
up_write(&nommu_vma_sem); up_write(&nommu_vma_sem);
...@@ -560,7 +605,7 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -560,7 +605,7 @@ unsigned long do_mmap_pgoff(struct file *file,
kfree(result); kfree(result);
error: error:
up_write(&nommu_vma_sem); up_write(&nommu_vma_sem);
kfree(tblock); kfree(vml);
if (vma) { if (vma) {
fput(vma->vm_file); fput(vma->vm_file);
kfree(vma); kfree(vma);
...@@ -570,19 +615,19 @@ unsigned long do_mmap_pgoff(struct file *file, ...@@ -570,19 +615,19 @@ unsigned long do_mmap_pgoff(struct file *file,
sharing_violation: sharing_violation:
up_write(&nommu_vma_sem); up_write(&nommu_vma_sem);
printk("Attempt to share mismatched mappings\n"); printk("Attempt to share mismatched mappings\n");
kfree(tblock); kfree(vml);
return -EINVAL; return -EINVAL;
error_getting_vma: error_getting_vma:
up_write(&nommu_vma_sem); up_write(&nommu_vma_sem);
kfree(tblock); kfree(vml);
printk("Allocation of tblock for %lu byte allocation from process %d failed\n", printk("Allocation of vml for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(); show_free_areas();
return -ENOMEM; return -ENOMEM;
error_getting_tblock: error_getting_vml:
printk("Allocation of tblock for %lu byte allocation from process %d failed\n", printk("Allocation of vml for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(); show_free_areas();
return -ENOMEM; return -ENOMEM;
...@@ -592,8 +637,12 @@ static void put_vma(struct vm_area_struct *vma) ...@@ -592,8 +637,12 @@ static void put_vma(struct vm_area_struct *vma)
{ {
if (vma) { if (vma) {
down_write(&nommu_vma_sem); down_write(&nommu_vma_sem);
if (atomic_dec_and_test(&vma->vm_usage)) { if (atomic_dec_and_test(&vma->vm_usage)) {
list_del_init(&vma->vm_link); rb_erase(&vma->vm_rb, &nommu_vma_tree);
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (!(vma->vm_flags & VM_IO) && vma->vm_start) { if (!(vma->vm_flags & VM_IO) && vma->vm_start) {
realalloc -= kobjsize((void *) vma->vm_start); realalloc -= kobjsize((void *) vma->vm_start);
...@@ -607,13 +656,14 @@ static void put_vma(struct vm_area_struct *vma) ...@@ -607,13 +656,14 @@ static void put_vma(struct vm_area_struct *vma)
askedalloc -= sizeof(*vma); askedalloc -= sizeof(*vma);
kfree(vma); kfree(vma);
} }
up_write(&nommu_vma_sem); up_write(&nommu_vma_sem);
} }
} }
int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
{ {
struct mm_tblock_struct *tblock, **parent; struct vm_list_struct *vml, **parent;
#ifdef MAGIC_ROM_PTR #ifdef MAGIC_ROM_PTR
/* For efficiency's sake, if the pointer is obviously in ROM, /* For efficiency's sake, if the pointer is obviously in ROM,
...@@ -626,23 +676,23 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) ...@@ -626,23 +676,23 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
printk("do_munmap:\n"); printk("do_munmap:\n");
#endif #endif
for (parent = &mm->context.tblock; *parent; parent = &(*parent)->next) for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
if ((*parent)->vma->vm_start == addr) if ((*parent)->vma->vm_start == addr)
break; break;
tblock = *parent; vml = *parent;
if (!tblock) { if (!vml) {
printk("munmap of non-mmaped memory by process %d (%s): %p\n", printk("munmap of non-mmaped memory by process %d (%s): %p\n",
current->pid, current->comm, (void *) addr); current->pid, current->comm, (void *) addr);
return -EINVAL; return -EINVAL;
} }
put_vma(tblock->vma); put_vma(vml->vma);
*parent = tblock->next; *parent = vml->next;
realalloc -= kobjsize(tblock); realalloc -= kobjsize(vml);
askedalloc -= sizeof(*tblock); askedalloc -= sizeof(*vml);
kfree(tblock); kfree(vml);
mm->total_vm -= len >> PAGE_SHIFT; mm->total_vm -= len >> PAGE_SHIFT;
#ifdef DEBUG #ifdef DEBUG
...@@ -655,7 +705,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) ...@@ -655,7 +705,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
/* Release all mmaps. */ /* Release all mmaps. */
void exit_mmap(struct mm_struct * mm) void exit_mmap(struct mm_struct * mm)
{ {
struct mm_tblock_struct *tmp; struct vm_list_struct *tmp;
if (mm) { if (mm) {
#ifdef DEBUG #ifdef DEBUG
...@@ -664,8 +714,8 @@ void exit_mmap(struct mm_struct * mm) ...@@ -664,8 +714,8 @@ void exit_mmap(struct mm_struct * mm)
mm->total_vm = 0; mm->total_vm = 0;
while ((tmp = mm->context.tblock)) { while ((tmp = mm->context.vmlist)) {
mm->context.tblock = tmp->next; mm->context.vmlist = tmp->next;
put_vma(tmp->vma); put_vma(tmp->vma);
realalloc -= kobjsize(tmp); realalloc -= kobjsize(tmp);
...@@ -709,7 +759,7 @@ unsigned long do_mremap(unsigned long addr, ...@@ -709,7 +759,7 @@ unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr) unsigned long flags, unsigned long new_addr)
{ {
struct mm_tblock_struct *tblock = NULL; struct vm_list_struct *vml = NULL;
/* insanity checks first */ /* insanity checks first */
if (new_len == 0) if (new_len == 0)
...@@ -718,29 +768,29 @@ unsigned long do_mremap(unsigned long addr, ...@@ -718,29 +768,29 @@ unsigned long do_mremap(unsigned long addr,
if (flags & MREMAP_FIXED && new_addr != addr) if (flags & MREMAP_FIXED && new_addr != addr)
return (unsigned long) -EINVAL; return (unsigned long) -EINVAL;
for (tblock = current->mm->context.tblock; tblock; tblock = tblock->next) for (vml = current->mm->context.vmlist; vml; vml = vml->next)
if (tblock->vma->vm_start == addr) if (vml->vma->vm_start == addr)
goto found; goto found;
return (unsigned long) -EINVAL; return (unsigned long) -EINVAL;
found: found:
if (tblock->vma->vm_end != tblock->vma->vm_start + old_len) if (vml->vma->vm_end != vml->vma->vm_start + old_len)
return (unsigned long) -EFAULT; return (unsigned long) -EFAULT;
if (tblock->vma->vm_flags & VM_MAYSHARE) if (vml->vma->vm_flags & VM_MAYSHARE)
return (unsigned long) -EPERM; return (unsigned long) -EPERM;
if (new_len > kobjsize((void *) addr)) if (new_len > kobjsize((void *) addr))
return (unsigned long) -ENOMEM; return (unsigned long) -ENOMEM;
/* all checks complete - do it */ /* all checks complete - do it */
tblock->vma->vm_end = tblock->vma->vm_start + new_len; vml->vma->vm_end = vml->vma->vm_start + new_len;
askedalloc -= old_len; askedalloc -= old_len;
askedalloc += new_len; askedalloc += new_len;
return tblock->vma->vm_start; return vml->vma->vm_start;
} }
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
...@@ -778,3 +828,20 @@ void arch_unmap_area(struct vm_area_struct *area) ...@@ -778,3 +828,20 @@ void arch_unmap_area(struct vm_area_struct *area)
{ {
} }
void update_mem_hiwater(void)
{
struct task_struct *tsk = current;
if (likely(tsk->mm)) {
if (tsk->mm->hiwater_rss < tsk->mm->rss)
tsk->mm->hiwater_rss = tsk->mm->rss;
if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
tsk->mm->hiwater_vm = tsk->mm->total_vm;
}
}
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen,
int even_cows)
{
}
...@@ -112,9 +112,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) ...@@ -112,9 +112,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (vma->vm_file) if (vma->vm_file)
fput(vma->vm_file); fput(vma->vm_file);
vma->vm_file = file; vma->vm_file = file;
#ifdef CONFIG_MMU
vma->vm_ops = &generic_file_vm_ops; vma->vm_ops = &generic_file_vm_ops;
#endif
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment