Commit c4c84f06 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

fs/proc/task_mmu: stop using linked list and highest_vm_end

Remove references to mm_struct linked list and highest_vm_end for when
they are removed

Link: https://lkml.kernel.org/r/20220906194824.2110408-44-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5f14b924
...@@ -285,7 +285,7 @@ struct proc_maps_private { ...@@ -285,7 +285,7 @@ struct proc_maps_private {
struct task_struct *task; struct task_struct *task;
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
struct vm_area_struct *tail_vma; struct vma_iterator iter;
#endif #endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy; struct mempolicy *task_mempolicy;
......
...@@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv) ...@@ -123,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
} }
#endif #endif
static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
loff_t *ppos)
{
struct vm_area_struct *vma = vma_next(&priv->iter);
if (vma) {
*ppos = vma->vm_start;
} else {
*ppos = -2UL;
vma = get_gate_vma(priv->mm);
}
return vma;
}
static void *m_start(struct seq_file *m, loff_t *ppos) static void *m_start(struct seq_file *m, loff_t *ppos)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
unsigned long last_addr = *ppos; unsigned long last_addr = *ppos;
struct mm_struct *mm; struct mm_struct *mm;
struct vm_area_struct *vma;
/* See m_next(). Zero at the start or after lseek. */ /* See m_next(). Zero at the start or after lseek. */
if (last_addr == -1UL) if (last_addr == -1UL)
...@@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos) ...@@ -152,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
vma_iter_init(&priv->iter, mm, last_addr);
hold_task_mempolicy(priv); hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm); if (last_addr == -2UL)
return get_gate_vma(mm);
vma = find_vma(mm, last_addr);
if (vma)
return vma;
return priv->tail_vma; return proc_get_vma(priv, ppos);
} }
static void *m_next(struct seq_file *m, void *v, loff_t *ppos) static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
{ {
struct proc_maps_private *priv = m->private; if (*ppos == -2UL) {
struct vm_area_struct *next, *vma = v; *ppos = -1UL;
return NULL;
if (vma == priv->tail_vma) }
next = NULL; return proc_get_vma(m->private, ppos);
else if (vma->vm_next)
next = vma->vm_next;
else
next = priv->tail_vma;
*ppos = next ? next->vm_start : -1UL;
return next;
} }
static void m_stop(struct seq_file *m, void *v) static void m_stop(struct seq_file *m, void *v)
...@@ -876,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -876,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
struct mem_size_stats mss; struct mem_size_stats mss;
struct mm_struct *mm; struct mm_struct *mm = priv->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long last_vma_end = 0; unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0; int ret = 0;
MA_STATE(mas, &mm->mm_mt, 0, 0);
priv->task = get_proc_task(priv->inode); priv->task = get_proc_task(priv->inode);
if (!priv->task) if (!priv->task)
return -ESRCH; return -ESRCH;
mm = priv->mm;
if (!mm || !mmget_not_zero(mm)) { if (!mm || !mmget_not_zero(mm)) {
ret = -ESRCH; ret = -ESRCH;
goto out_put_task; goto out_put_task;
...@@ -898,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -898,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
goto out_put_mm; goto out_put_mm;
hold_task_mempolicy(priv); hold_task_mempolicy(priv);
vma = mas_find(&mas, 0);
if (unlikely(!vma))
goto empty_set;
for (vma = priv->mm->mmap; vma;) { vma_start = vma->vm_start;
do {
smap_gather_stats(vma, &mss, 0); smap_gather_stats(vma, &mss, 0);
last_vma_end = vma->vm_end; last_vma_end = vma->vm_end;
...@@ -908,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -908,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* access it for write request. * access it for write request.
*/ */
if (mmap_lock_is_contended(mm)) { if (mmap_lock_is_contended(mm)) {
mas_pause(&mas);
mmap_read_unlock(mm); mmap_read_unlock(mm);
ret = mmap_read_lock_killable(mm); ret = mmap_read_lock_killable(mm);
if (ret) { if (ret) {
...@@ -951,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -951,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* contains last_vma_end. * contains last_vma_end.
* Iterate VMA' from last_vma_end. * Iterate VMA' from last_vma_end.
*/ */
vma = find_vma(mm, last_vma_end - 1); vma = mas_find(&mas, ULONG_MAX);
/* Case 3 above */ /* Case 3 above */
if (!vma) if (!vma)
break; break;
...@@ -965,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v) ...@@ -965,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
smap_gather_stats(vma, &mss, last_vma_end); smap_gather_stats(vma, &mss, last_vma_end);
} }
/* Case 2 above */ /* Case 2 above */
vma = vma->vm_next; } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
}
show_vma_header_prefix(m, priv->mm->mmap->vm_start, empty_set:
last_vma_end, 0, 0, 0, 0); show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
seq_pad(m, ' '); seq_pad(m, ' ');
seq_puts(m, "[rollup]\n"); seq_puts(m, "[rollup]\n");
...@@ -1262,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, ...@@ -1262,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
return -ESRCH; return -ESRCH;
mm = get_task_mm(task); mm = get_task_mm(task);
if (mm) { if (mm) {
MA_STATE(mas, &mm->mm_mt, 0, 0);
struct mmu_notifier_range range; struct mmu_notifier_range range;
struct clear_refs_private cp = { struct clear_refs_private cp = {
.type = type, .type = type,
...@@ -1281,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, ...@@ -1281,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
} }
if (type == CLEAR_REFS_SOFT_DIRTY) { if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { mas_for_each(&mas, vma, ULONG_MAX) {
if (!(vma->vm_flags & VM_SOFTDIRTY)) if (!(vma->vm_flags & VM_SOFTDIRTY))
continue; continue;
vma->vm_flags &= ~VM_SOFTDIRTY; vma->vm_flags &= ~VM_SOFTDIRTY;
...@@ -1293,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, ...@@ -1293,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
0, NULL, mm, 0, -1UL); 0, NULL, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
} }
walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops, walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
&cp);
if (type == CLEAR_REFS_SOFT_DIRTY) { if (type == CLEAR_REFS_SOFT_DIRTY) {
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
flush_tlb_mm(mm); flush_tlb_mm(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment