Commit 3d2a9d64 authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Jason Gunthorpe

IB/hfi1: Ensure correct mm is used at all times

Two earlier bug fixes have created a security problem in the hfi1
driver. One fix aimed to solve an issue where current->mm was not valid
when closing the hfi1 cdev. It attempted to do this by saving a cached
value of the current->mm pointer at file open time. This is a problem if
another process with access to the FD calls in via write() or ioctl() to
pin pages via the hfi driver. The other fix tried to solve a use after
free by taking a reference on the mm.

To fix this correctly we use the existing cached value of the mm in the
mmu notifier. Now we can check in the insert, evict, etc. routines that
current->mm matched what the notifier was registered for. If not, then
don't allow access. The register of the mmu notifier will save the mm
pointer.

Since in do_exit() the exit_mm() is called before exit_files(), which
would call our close routine a reference is needed on the mm. We rely on
the mmgrab done by the registration of the notifier, whereas before it was
explicit. The mmu notifier deregistration happens when the user context is
torn down, the creation of which triggered the registration.

Also of note is we do not do any explicit work to protect the interval
tree notifier. It doesn't seem that this is going to be needed since we
aren't actually doing anything with current->mm. The interval tree
notifier stuff still has a FIXME noted from a previous commit that will be
addressed in a follow on patch.

Cc: <stable@vger.kernel.org>
Fixes: e0cf75de ("IB/hfi1: Fix mm_struct use after free")
Fixes: 3faa3d9a ("IB/hfi1: Make use of mm consistent")
Link: https://lore.kernel.org/r/20201125210112.104301.51331.stgit@awfm-01.aw.intel.comSuggested-by: default avatarJann Horn <jannh@google.com>
Reported-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 2ed38143
/* /*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation. * Copyright(c) 2015-2020 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) ...@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
spin_lock_init(&fd->tid_lock); spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock); spin_lock_init(&fd->invalid_lock);
fd->rec_cpu_num = -1; /* no cpu affinity by default */ fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd; fd->dd = dd;
fp->private_data = fd; fp->private_data = fd;
return 0; return 0;
...@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) ...@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
deallocate_ctxt(uctxt); deallocate_ctxt(uctxt);
done: done:
mmdrop(fdata->mm);
if (atomic_dec_and_test(&dd->user_refcount)) if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp); complete(&dd->user_comp);
......
#ifndef _HFI1_KERNEL_H #ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H #define _HFI1_KERNEL_H
/* /*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation. * Copyright(c) 2015-2020 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -1451,7 +1452,6 @@ struct hfi1_filedata { ...@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
u32 invalid_tid_idx; u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */ /* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock; spinlock_t invalid_lock;
struct mm_struct *mm;
}; };
extern struct xarray hfi1_dev_table; extern struct xarray hfi1_dev_table;
......
/* /*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 - 2017 Intel Corporation. * Copyright(c) 2016 - 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -48,23 +49,11 @@ ...@@ -48,23 +49,11 @@
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h> #include <linux/interval_tree_generic.h>
#include <linux/sched/mm.h>
#include "mmu_rb.h" #include "mmu_rb.h"
#include "trace.h" #include "trace.h"
struct mmu_rb_handler {
struct mmu_notifier mn;
struct rb_root_cached root;
void *ops_arg;
spinlock_t lock; /* protect the RB tree */
struct mmu_rb_ops *ops;
struct mm_struct *mm;
struct list_head lru_list;
struct work_struct del_work;
struct list_head del_list;
struct workqueue_struct *wq;
};
static unsigned long mmu_node_start(struct mmu_rb_node *); static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *); static unsigned long mmu_node_last(struct mmu_rb_node *);
static int mmu_notifier_range_start(struct mmu_notifier *, static int mmu_notifier_range_start(struct mmu_notifier *,
...@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node) ...@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
return PAGE_ALIGN(node->addr + node->len) - 1; return PAGE_ALIGN(node->addr + node->len) - 1;
} }
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops, struct mmu_rb_ops *ops,
struct workqueue_struct *wq, struct workqueue_struct *wq,
struct mmu_rb_handler **handler) struct mmu_rb_handler **handler)
{ {
struct mmu_rb_handler *handlr; struct mmu_rb_handler *h;
int ret; int ret;
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); h = kmalloc(sizeof(*h), GFP_KERNEL);
if (!handlr) if (!h)
return -ENOMEM; return -ENOMEM;
handlr->root = RB_ROOT_CACHED; h->root = RB_ROOT_CACHED;
handlr->ops = ops; h->ops = ops;
handlr->ops_arg = ops_arg; h->ops_arg = ops_arg;
INIT_HLIST_NODE(&handlr->mn.hlist); INIT_HLIST_NODE(&h->mn.hlist);
spin_lock_init(&handlr->lock); spin_lock_init(&h->lock);
handlr->mn.ops = &mn_opts; h->mn.ops = &mn_opts;
handlr->mm = mm; INIT_WORK(&h->del_work, handle_remove);
INIT_WORK(&handlr->del_work, handle_remove); INIT_LIST_HEAD(&h->del_list);
INIT_LIST_HEAD(&handlr->del_list); INIT_LIST_HEAD(&h->lru_list);
INIT_LIST_HEAD(&handlr->lru_list); h->wq = wq;
handlr->wq = wq;
ret = mmu_notifier_register(&h->mn, current->mm);
ret = mmu_notifier_register(&handlr->mn, handlr->mm);
if (ret) { if (ret) {
kfree(handlr); kfree(h);
return ret; return ret;
} }
*handler = handlr; *handler = h;
return 0; return 0;
} }
...@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) ...@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
struct list_head del_list; struct list_head del_list;
/* Unregister first so we don't get any more notifications. */ /* Unregister first so we don't get any more notifications. */
mmu_notifier_unregister(&handler->mn, handler->mm); mmu_notifier_unregister(&handler->mn, handler->mn.mm);
/* /*
* Make sure the wq delete handler is finished running. It will not * Make sure the wq delete handler is finished running. It will not
...@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, ...@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
int ret = 0; int ret = 0;
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len); trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
if (current->mm != handler->mn.mm)
return -EPERM;
spin_lock_irqsave(&handler->lock, flags); spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, mnode->addr, mnode->len); node = __mmu_rb_search(handler, mnode->addr, mnode->len);
if (node) { if (node) {
...@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, ...@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
__mmu_int_rb_remove(mnode, &handler->root); __mmu_int_rb_remove(mnode, &handler->root);
list_del(&mnode->list); /* remove from LRU list */ list_del(&mnode->list); /* remove from LRU list */
} }
mnode->handler = handler;
unlock: unlock:
spin_unlock_irqrestore(&handler->lock, flags); spin_unlock_irqrestore(&handler->lock, flags);
return ret; return ret;
...@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, ...@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
unsigned long flags; unsigned long flags;
bool ret = false; bool ret = false;
if (current->mm != handler->mn.mm)
return ret;
spin_lock_irqsave(&handler->lock, flags); spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len); node = __mmu_rb_search(handler, addr, len);
if (node) { if (node) {
...@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) ...@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
unsigned long flags; unsigned long flags;
bool stop = false; bool stop = false;
if (current->mm != handler->mn.mm)
return;
INIT_LIST_HEAD(&del_list); INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags); spin_lock_irqsave(&handler->lock, flags);
...@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, ...@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
{ {
unsigned long flags; unsigned long flags;
if (current->mm != handler->mn.mm)
return;
/* Validity of handler and node pointers has been checked by caller. */ /* Validity of handler and node pointers has been checked by caller. */
trace_hfi1_mmu_rb_remove(node->addr, node->len); trace_hfi1_mmu_rb_remove(node->addr, node->len);
spin_lock_irqsave(&handler->lock, flags); spin_lock_irqsave(&handler->lock, flags);
......
/* /*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -54,6 +55,7 @@ struct mmu_rb_node { ...@@ -54,6 +55,7 @@ struct mmu_rb_node {
unsigned long len; unsigned long len;
unsigned long __last; unsigned long __last;
struct rb_node node; struct rb_node node;
struct mmu_rb_handler *handler;
struct list_head list; struct list_head list;
}; };
...@@ -71,7 +73,19 @@ struct mmu_rb_ops { ...@@ -71,7 +73,19 @@ struct mmu_rb_ops {
void *evict_arg, bool *stop); void *evict_arg, bool *stop);
}; };
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm, struct mmu_rb_handler {
struct mmu_notifier mn;
struct rb_root_cached root;
void *ops_arg;
spinlock_t lock; /* protect the RB tree */
struct mmu_rb_ops *ops;
struct list_head lru_list;
struct work_struct del_work;
struct list_head del_list;
struct workqueue_struct *wq;
};
int hfi1_mmu_rb_register(void *ops_arg,
struct mmu_rb_ops *ops, struct mmu_rb_ops *ops,
struct workqueue_struct *wq, struct workqueue_struct *wq,
struct mmu_rb_handler **handler); struct mmu_rb_handler **handler);
......
/* /*
* Copyright(c) 2020 Cornelis Networks, Inc.
* Copyright(c) 2015-2018 Intel Corporation. * Copyright(c) 2015-2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd, ...@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
{ {
struct page **pages; struct page **pages;
struct hfi1_devdata *dd = fd->uctxt->dd; struct hfi1_devdata *dd = fd->uctxt->dd;
struct mm_struct *mm;
if (mapped) { if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr, pci_unmap_single(dd->pcidev, node->dma_addr,
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
pages = &node->pages[idx]; pages = &node->pages[idx];
mm = mm_from_tid_node(node);
} else { } else {
pages = &tidbuf->pages[idx]; pages = &tidbuf->pages[idx];
mm = current->mm;
} }
hfi1_release_user_pages(fd->mm, pages, npages, mapped); hfi1_release_user_pages(mm, pages, npages, mapped);
fd->tid_n_pinned -= npages; fd->tid_n_pinned -= npages;
} }
...@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) ...@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
* pages, accept the amount pinned so far and program only that. * pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers. * User space knows how to deal with partially programmed buffers.
*/ */
if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) { if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
kfree(pages); kfree(pages);
return -ENOMEM; return -ENOMEM;
} }
pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
if (pinned <= 0) { if (pinned <= 0) {
kfree(pages); kfree(pages);
return pinned; return pinned;
...@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
if (fd->use_mn) { if (fd->use_mn) {
ret = mmu_interval_notifier_insert( ret = mmu_interval_notifier_insert(
&node->notifier, fd->mm, &node->notifier, current->mm,
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE, tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
&tid_mn_ops); &tid_mn_ops);
if (ret) if (ret)
......
#ifndef _HFI1_USER_EXP_RCV_H #ifndef _HFI1_USER_EXP_RCV_H
#define _HFI1_USER_EXP_RCV_H #define _HFI1_USER_EXP_RCV_H
/* /*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2017 Intel Corporation. * Copyright(c) 2015 - 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, ...@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd, int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
struct hfi1_tid_info *tinfo); struct hfi1_tid_info *tinfo);
static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
{
return node->notifier.mm;
}
#endif /* _HFI1_USER_EXP_RCV_H */ #endif /* _HFI1_USER_EXP_RCV_H */
/* /*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation. * Copyright(c) 2015 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, ...@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
atomic_set(&pq->n_reqs, 0); atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait); init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0); atomic_set(&pq->n_locked, 0);
pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL, NULL); activate_packet_queue, NULL, NULL);
...@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, ...@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
cq->nentries = hfi1_sdma_comp_ring_size; cq->nentries = hfi1_sdma_comp_ring_size;
ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq, ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
&pq->handler); &pq->handler);
if (ret) { if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret); dd_dev_err(dd, "Failed to register with MMU %d", ret);
...@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req, ...@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
npages -= node->npages; npages -= node->npages;
retry: retry:
if (!hfi1_can_pin_pages(pq->dd, pq->mm, if (!hfi1_can_pin_pages(pq->dd, current->mm,
atomic_read(&pq->n_locked), npages)) { atomic_read(&pq->n_locked), npages)) {
cleared = sdma_cache_evict(pq, npages); cleared = sdma_cache_evict(pq, npages);
if (cleared >= npages) if (cleared >= npages)
goto retry; goto retry;
} }
pinned = hfi1_acquire_user_pages(pq->mm, pinned = hfi1_acquire_user_pages(current->mm,
((unsigned long)iovec->iov.iov_base + ((unsigned long)iovec->iov.iov_base +
(node->npages * PAGE_SIZE)), npages, 0, (node->npages * PAGE_SIZE)), npages, 0,
pages + node->npages); pages + node->npages);
...@@ -995,7 +995,7 @@ static int pin_sdma_pages(struct user_sdma_request *req, ...@@ -995,7 +995,7 @@ static int pin_sdma_pages(struct user_sdma_request *req,
return pinned; return pinned;
} }
if (pinned != npages) { if (pinned != npages) {
unpin_vector_pages(pq->mm, pages, node->npages, pinned); unpin_vector_pages(current->mm, pages, node->npages, pinned);
return -EFAULT; return -EFAULT;
} }
kfree(node->pages); kfree(node->pages);
...@@ -1008,7 +1008,8 @@ static int pin_sdma_pages(struct user_sdma_request *req, ...@@ -1008,7 +1008,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
static void unpin_sdma_pages(struct sdma_mmu_node *node) static void unpin_sdma_pages(struct sdma_mmu_node *node)
{ {
if (node->npages) { if (node->npages) {
unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
node->npages);
atomic_sub(node->npages, &node->pq->n_locked); atomic_sub(node->npages, &node->pq->n_locked);
} }
} }
......
#ifndef _HFI1_USER_SDMA_H #ifndef _HFI1_USER_SDMA_H
#define _HFI1_USER_SDMA_H #define _HFI1_USER_SDMA_H
/* /*
* Copyright(c) 2020 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation. * Copyright(c) 2015 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
...@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q { ...@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
unsigned long unpinned; unsigned long unpinned;
struct mmu_rb_handler *handler; struct mmu_rb_handler *handler;
atomic_t n_locked; atomic_t n_locked;
struct mm_struct *mm;
}; };
struct hfi1_user_sdma_comp_q { struct hfi1_user_sdma_comp_q {
...@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, ...@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim, struct iovec *iovec, unsigned long dim,
unsigned long *count); unsigned long *count);
static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
{
return node->rb.handler->mn.mm;
}
#endif /* _HFI1_USER_SDMA_H */ #endif /* _HFI1_USER_SDMA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment