Commit afa684f6 authored by Cliff Wickman's avatar Cliff Wickman Committed by Linus Torvalds

fix "mspec: handle shrinking virtual memory areas"

The vma_data structure may be shared by vma's from multiple tasks, with no
way of knowing which areas are shared or not shared, so release/clear pages
only when the refcount (of vma's) goes to zero.
Signed-off-by: default avatarCliff Wickman <cpw@sgi.com>
Cc: Jes Sorensen <jes@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f9b7cba1
...@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma) ...@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
* mspec_close * mspec_close
* *
* Called when unmapping a device mapping. Frees all mspec pages * Called when unmapping a device mapping. Frees all mspec pages
* belonging to the vma. * belonging to all the vma's sharing this vma_data structure.
*/ */
static void static void
mspec_close(struct vm_area_struct *vma) mspec_close(struct vm_area_struct *vma)
{ {
struct vma_data *vdata; struct vma_data *vdata;
int index, last_index, result; int index, last_index;
unsigned long my_page; unsigned long my_page;
vdata = vma->vm_private_data; vdata = vma->vm_private_data;
BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end); if (!atomic_dec_and_test(&vdata->refcnt))
return;
spin_lock(&vdata->lock); last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT; for (index = 0; index < last_index; index++) {
last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
for (; index < last_index; index++) {
if (vdata->maddr[index] == 0) if (vdata->maddr[index] == 0)
continue; continue;
/* /*
...@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma) ...@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
*/ */
my_page = vdata->maddr[index]; my_page = vdata->maddr[index];
vdata->maddr[index] = 0; vdata->maddr[index] = 0;
spin_unlock(&vdata->lock); if (!mspec_zero_block(my_page, PAGE_SIZE))
result = mspec_zero_block(my_page, PAGE_SIZE);
if (!result)
uncached_free_page(my_page); uncached_free_page(my_page);
else else
printk(KERN_WARNING "mspec_close(): " printk(KERN_WARNING "mspec_close(): "
"failed to zero page %i\n", "failed to zero page %ld\n", my_page);
result);
spin_lock(&vdata->lock);
} }
spin_unlock(&vdata->lock);
if (!atomic_dec_and_test(&vdata->refcnt))
return;
if (vdata->flags & VMD_VMALLOCED) if (vdata->flags & VMD_VMALLOCED)
vfree(vdata); vfree(vdata);
...@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma) ...@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
kfree(vdata); kfree(vdata);
} }
/* /*
* mspec_nopfn * mspec_nopfn
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment