Commit ece8ea7b authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford

RDMA/usnic: Do not use ucontext->tgid

Update this driver to match the code it copies from umem.c which no longer
uses tgid.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d4b4dd1b
...@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq; ...@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq;
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
static void usnic_uiom_reg_account(struct work_struct *work)
{
struct usnic_uiom_reg *umem = container_of(work,
struct usnic_uiom_reg, work);
down_write(&umem->mm->mmap_sem);
umem->mm->locked_vm -= umem->diff;
up_write(&umem->mm->mmap_sem);
mmput(umem->mm);
kfree(umem);
}
static int usnic_uiom_dma_fault(struct iommu_domain *domain, static int usnic_uiom_dma_fault(struct iommu_domain *domain,
struct device *dev, struct device *dev,
unsigned long iova, int flags, unsigned long iova, int flags,
...@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) ...@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
} }
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int dmasync, struct list_head *chunk_list) int dmasync, struct usnic_uiom_reg *uiomr)
{ {
struct list_head *chunk_list = &uiomr->chunk_list;
struct page **page_list; struct page **page_list;
struct scatterlist *sg; struct scatterlist *sg;
struct usnic_uiom_chunk *chunk; struct usnic_uiom_chunk *chunk;
...@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ...@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int flags; int flags;
dma_addr_t pa; dma_addr_t pa;
unsigned int gup_flags; unsigned int gup_flags;
struct mm_struct *mm;
/* /*
* If the combination of the addr and size requested for this memory * If the combination of the addr and size requested for this memory
...@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ...@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
down_write(&current->mm->mmap_sem); uiomr->owning_mm = mm = current->mm;
down_write(&mm->mmap_sem);
locked = npages + current->mm->pinned_vm; locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
...@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ...@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
out: out:
if (ret < 0) if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0); usnic_uiom_put_pages(chunk_list, 0);
else else {
current->mm->pinned_vm = locked; mm->pinned_vm = locked;
mmgrab(uiomr->owning_mm);
}
up_write(&current->mm->mmap_sem); up_write(&mm->mmap_sem);
free_page((unsigned long) page_list); free_page((unsigned long) page_list);
return ret; return ret;
} }
...@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, ...@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
uiomr->pd = pd; uiomr->pd = pd;
err = usnic_uiom_get_pages(addr, size, writable, dmasync, err = usnic_uiom_get_pages(addr, size, writable, dmasync,
&uiomr->chunk_list); uiomr);
if (err) { if (err) {
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
vpn_start, vpn_last, err); vpn_start, vpn_last, err);
...@@ -426,29 +419,39 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, ...@@ -426,29 +419,39 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
out_put_pages: out_put_pages:
usnic_uiom_put_pages(&uiomr->chunk_list, 0); usnic_uiom_put_pages(&uiomr->chunk_list, 0);
spin_unlock(&pd->lock); spin_unlock(&pd->lock);
mmdrop(uiomr->owning_mm);
out_free_uiomr: out_free_uiomr:
kfree(uiomr); kfree(uiomr);
return ERR_PTR(err); return ERR_PTR(err);
} }
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
struct ib_ucontext *ucontext)
{ {
struct task_struct *task; mmdrop(uiomr->owning_mm);
struct mm_struct *mm; kfree(uiomr);
unsigned long diff; }
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1); static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
{
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
}
task = get_pid_task(ucontext->tgid, PIDTYPE_PID); static void usnic_uiom_release_defer(struct work_struct *work)
if (!task) {
goto out; struct usnic_uiom_reg *uiomr =
mm = get_task_mm(task); container_of(work, struct usnic_uiom_reg, work);
put_task_struct(task);
if (!mm)
goto out;
diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; down_write(&uiomr->owning_mm->mmap_sem);
uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);
}
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
struct ib_ucontext *context)
{
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
/* /*
* We may be called with the mm's mmap_sem already held. This * We may be called with the mm's mmap_sem already held. This
...@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, ...@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
* the last reference to our file and calls our release * the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end * method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case * up here and not be able to take the mmap_sem. In that case
* we defer the vm_locked accounting to the system workqueue. * we defer the vm_locked accounting to a workqueue.
*/ */
if (ucontext->closing) { if (context->closing) {
if (!down_write_trylock(&mm->mmap_sem)) { if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
INIT_WORK(&uiomr->work, usnic_uiom_reg_account); INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
uiomr->mm = mm;
uiomr->diff = diff;
queue_work(usnic_uiom_wq, &uiomr->work); queue_work(usnic_uiom_wq, &uiomr->work);
return; return;
} }
} else } else {
down_write(&mm->mmap_sem); down_write(&uiomr->owning_mm->mmap_sem);
}
uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
up_write(&uiomr->owning_mm->mmap_sem);
mm->pinned_vm -= diff; __usnic_uiom_release_tail(uiomr);
up_write(&mm->mmap_sem);
mmput(mm);
out:
kfree(uiomr);
} }
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
......
...@@ -71,8 +71,7 @@ struct usnic_uiom_reg { ...@@ -71,8 +71,7 @@ struct usnic_uiom_reg {
int writable; int writable;
struct list_head chunk_list; struct list_head chunk_list;
struct work_struct work; struct work_struct work;
struct mm_struct *mm; struct mm_struct *owning_mm;
unsigned long diff;
}; };
struct usnic_uiom_chunk { struct usnic_uiom_chunk {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment