Commit d9fa0e37 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

cxl: remove vma linked list walk

Use the VMA iterator instead.  This requires a little restructuring of the
surrounding code to hoist the mm to the caller.  That turns
cxl_prefault_one() into a trivial function, so call cxl_fault_segment()
directly.

Link: https://lkml.kernel.org/r/20220906194824.2110408-38-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 49c40fb4
...@@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work) ...@@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
mmput(mm); mmput(mm);
} }
static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
{
struct mm_struct *mm;
mm = get_mem_context(ctx);
if (mm == NULL) {
pr_devel("cxl_prefault_one unable to get mm %i\n",
pid_nr(ctx->pid));
return;
}
cxl_fault_segment(ctx, mm, ea);
mmput(mm);
}
static u64 next_segment(u64 ea, u64 vsid) static u64 next_segment(u64 ea, u64 vsid)
{ {
if (vsid & SLB_VSID_B_1T) if (vsid & SLB_VSID_B_1T)
...@@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid) ...@@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid)
return ea + 1; return ea + 1;
} }
static void cxl_prefault_vma(struct cxl_context *ctx) static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
{ {
u64 ea, last_esid = 0; u64 ea, last_esid = 0;
struct copro_slb slb; struct copro_slb slb;
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma; struct vm_area_struct *vma;
int rc; int rc;
struct mm_struct *mm;
mm = get_mem_context(ctx);
if (mm == NULL) {
pr_devel("cxl_prefault_vm unable to get mm %i\n",
pid_nr(ctx->pid));
return;
}
mmap_read_lock(mm); mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) { for_each_vma(vmi, vma) {
for (ea = vma->vm_start; ea < vma->vm_end; for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) { ea = next_segment(ea, slb.vsid)) {
rc = copro_calculate_slb(mm, ea, &slb); rc = copro_calculate_slb(mm, ea, &slb);
...@@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx) ...@@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
} }
} }
mmap_read_unlock(mm); mmap_read_unlock(mm);
mmput(mm);
} }
void cxl_prefault(struct cxl_context *ctx, u64 wed) void cxl_prefault(struct cxl_context *ctx, u64 wed)
{ {
struct mm_struct *mm = get_mem_context(ctx);
if (mm == NULL) {
pr_devel("cxl_prefault unable to get mm %i\n",
pid_nr(ctx->pid));
return;
}
switch (ctx->afu->prefault_mode) { switch (ctx->afu->prefault_mode) {
case CXL_PREFAULT_WED: case CXL_PREFAULT_WED:
cxl_prefault_one(ctx, wed); cxl_fault_segment(ctx, mm, wed);
break; break;
case CXL_PREFAULT_ALL: case CXL_PREFAULT_ALL:
cxl_prefault_vma(ctx); cxl_prefault_vma(ctx, mm);
break; break;
default: default:
break; break;
} }
mmput(mm);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment