Commit ebb3c4c4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: fix the mlock accounting

Reading through the code I saw I forgot the finish the mlock accounting.
Do so now.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094517.899767331@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f6c7d5fe
...@@ -1461,13 +1461,14 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -1461,13 +1461,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
if (atomic_dec_and_mutex_lock(&counter->mmap_count, if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) { &counter->mmap_mutex)) {
vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
perf_mmap_data_free(counter); perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex); mutex_unlock(&counter->mmap_mutex);
} }
} }
static struct vm_operations_struct perf_mmap_vmops = { static struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open, .open = perf_mmap_open,
.close = perf_mmap_close, .close = perf_mmap_close,
.fault = perf_mmap_fault, .fault = perf_mmap_fault,
}; };
...@@ -1499,24 +1500,32 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1499,24 +1500,32 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_pgoff != 0) if (vma->vm_pgoff != 0)
return -EINVAL; return -EINVAL;
locked = vma_size >> PAGE_SHIFT; mutex_lock(&counter->mmap_mutex);
locked += vma->vm_mm->locked_vm; if (atomic_inc_not_zero(&counter->mmap_count)) {
if (nr_pages != counter->data->nr_pages)
ret = -EINVAL;
goto unlock;
}
locked = vma->vm_mm->locked_vm;
locked += nr_pages + 1;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT; lock_limit >>= PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
return -EPERM; ret = -EPERM;
goto unlock;
mutex_lock(&counter->mmap_mutex); }
if (atomic_inc_not_zero(&counter->mmap_count))
goto out;
WARN_ON(counter->data); WARN_ON(counter->data);
ret = perf_mmap_data_alloc(counter, nr_pages); ret = perf_mmap_data_alloc(counter, nr_pages);
if (!ret) if (ret)
atomic_set(&counter->mmap_count, 1); goto unlock;
out:
atomic_set(&counter->mmap_count, 1);
vma->vm_mm->locked_vm += nr_pages + 1;
unlock:
mutex_unlock(&counter->mmap_mutex); mutex_unlock(&counter->mmap_mutex);
vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags &= ~VM_MAYWRITE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment