Commit 8974efaa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:

 - Several hfi1 patches fixing some long standing driver bugs

 - Overflow when working with sg lists with elements greater than 4G

 - An rxe regression with object numbering after the mrs reach their
   limit

 - A theoretical problem with the scatterlist merging code

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  lib/scatterlist: Fix to calculate the last_pg properly
  IB/hfi1: Remove user expected buffer invalidate race
  IB/hfi1: Immediately remove invalid memory from hardware
  IB/hfi1: Fix expected receive setup error exit issues
  IB/hfi1: Reserve user expected TIDs
  IB/hfi1: Reject a zero-length user expected buffer
  RDMA/core: Fix ib block iterator counter overflow
  RDMA/rxe: Prevent faulty rkey generation
  RDMA/rxe: Fix inaccurate constants in rxe_type_info
parents edc00350 0f097f08
...@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start); ...@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter) bool __rdma_block_iter_next(struct ib_block_iter *biter)
{ {
unsigned int block_offset; unsigned int block_offset;
unsigned int sg_delta;
if (!biter->__sg_nents || !biter->__sg) if (!biter->__sg_nents || !biter->__sg)
return false; return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
biter->__sg_advance += sg_delta;
} else {
biter->__sg_advance = 0; biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg); biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--; biter->__sg_nents--;
......
...@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, ...@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range, const struct mmu_notifier_range *range,
unsigned long cur_seq); unsigned long cur_seq);
static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *, static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
struct tid_group *grp, struct tid_group *grp,
unsigned int start, u16 count, unsigned int start, u16 count,
u32 *tidlist, unsigned int *tididx, u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped); unsigned int *pmapped);
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
struct tid_group **grp); static void __clear_tid_node(struct hfi1_filedata *fd,
struct tid_rb_node *node);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
static const struct mmu_interval_notifier_ops tid_mn_ops = { static const struct mmu_interval_notifier_ops tid_mn_ops = {
.invalidate = tid_rb_invalidate, .invalidate = tid_rb_invalidate,
}; };
static const struct mmu_interval_notifier_ops tid_cover_ops = {
.invalidate = tid_cover_invalidate,
};
/* /*
* Initialize context and file private data needed for Expected * Initialize context and file private data needed for Expected
...@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, ...@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tididx = 0, mapped, mapped_pages = 0; tididx = 0, mapped, mapped_pages = 0;
u32 *tidlist = NULL; u32 *tidlist = NULL;
struct tid_user_buf *tidbuf; struct tid_user_buf *tidbuf;
unsigned long mmu_seq = 0;
if (!PAGE_ALIGNED(tinfo->vaddr)) if (!PAGE_ALIGNED(tinfo->vaddr))
return -EINVAL; return -EINVAL;
if (tinfo->length == 0)
return -EINVAL;
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf) if (!tidbuf)
return -ENOMEM; return -ENOMEM;
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr; tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length; tidbuf->length = tinfo->length;
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL); GFP_KERNEL);
if (!tidbuf->psets) { if (!tidbuf->psets) {
kfree(tidbuf); ret = -ENOMEM;
return -ENOMEM; goto fail_release_mem;
}
if (fd->use_mn) {
ret = mmu_interval_notifier_insert(
&tidbuf->notifier, current->mm,
tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
&tid_cover_ops);
if (ret)
goto fail_release_mem;
mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
} }
pinned = pin_rcv_pages(fd, tidbuf); pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) { if (pinned <= 0) {
kfree(tidbuf->psets); ret = (pinned < 0) ? pinned : -ENOSPC;
kfree(tidbuf); goto fail_unpin;
return pinned;
} }
/* Find sets of physically contiguous pages */ /* Find sets of physically contiguous pages */
tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
/* /* Reserve the number of expected tids to be used. */
* We don't need to access this under a lock since tid_used is per
* process and the same process cannot be in hfi1_user_exp_rcv_clear()
* and hfi1_user_exp_rcv_setup() at the same time.
*/
spin_lock(&fd->tid_lock); spin_lock(&fd->tid_lock);
if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used; pageset_count = fd->tid_limit - fd->tid_used;
else else
pageset_count = tidbuf->n_psets; pageset_count = tidbuf->n_psets;
fd->tid_used += pageset_count;
spin_unlock(&fd->tid_lock); spin_unlock(&fd->tid_lock);
if (!pageset_count) if (!pageset_count) {
goto bail; ret = -ENOSPC;
goto fail_unreserve;
}
ngroups = pageset_count / dd->rcv_entries.group_size; ngroups = pageset_count / dd->rcv_entries.group_size;
tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
if (!tidlist) { if (!tidlist) {
ret = -ENOMEM; ret = -ENOMEM;
goto nomem; goto fail_unreserve;
} }
tididx = 0; tididx = 0;
...@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, ...@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
} }
unlock: unlock:
mutex_unlock(&uctxt->exp_mutex); mutex_unlock(&uctxt->exp_mutex);
nomem:
hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
mapped_pages, ret); mapped_pages, ret);
if (tididx) {
/* fail if nothing was programmed, set error if none provided */
if (tididx == 0) {
if (ret >= 0)
ret = -ENOSPC;
goto fail_unreserve;
}
/* adjust reserved tid_used to actual count */
spin_lock(&fd->tid_lock); spin_lock(&fd->tid_lock);
fd->tid_used += tididx; fd->tid_used -= pageset_count - tididx;
spin_unlock(&fd->tid_lock); spin_unlock(&fd->tid_lock);
/* unpin all pages not covered by a TID */
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
false);
if (fd->use_mn) {
/* check for an invalidate during setup */
bool fail = false;
mutex_lock(&tidbuf->cover_mutex);
fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
mutex_unlock(&tidbuf->cover_mutex);
if (fail) {
ret = -EBUSY;
goto fail_unprogram;
}
}
tinfo->tidcnt = tididx; tinfo->tidcnt = tididx;
tinfo->length = mapped_pages * PAGE_SIZE; tinfo->length = mapped_pages * PAGE_SIZE;
if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
tidlist, sizeof(tidlist[0]) * tididx)) { tidlist, sizeof(tidlist[0]) * tididx)) {
/*
* On failure to copy to the user level, we need to undo
* everything done so far so we don't leak resources.
*/
tinfo->tidlist = (unsigned long)&tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
ret = -EFAULT; ret = -EFAULT;
goto bail; goto fail_unprogram;
}
} }
/* if (fd->use_mn)
* If not everything was mapped (due to insufficient RcvArray entries, mmu_interval_notifier_remove(&tidbuf->notifier);
* for example), unpin all unmapped pages so we can pin them nex time. kfree(tidbuf->pages);
*/
if (mapped_pages != pinned)
unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
(pinned - mapped_pages), false);
bail:
kfree(tidbuf->psets); kfree(tidbuf->psets);
kfree(tidbuf);
kfree(tidlist); kfree(tidlist);
return 0;
fail_unprogram:
/* unprogram, unmap, and unpin all allocated TIDs */
tinfo->tidlist = (unsigned long)tidlist;
hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
pinned = 0; /* nothing left to unpin */
pageset_count = 0; /* nothing left reserved */
fail_unreserve:
spin_lock(&fd->tid_lock);
fd->tid_used -= pageset_count;
spin_unlock(&fd->tid_lock);
fail_unpin:
if (fd->use_mn)
mmu_interval_notifier_remove(&tidbuf->notifier);
if (pinned > 0)
unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
fail_release_mem:
kfree(tidbuf->pages); kfree(tidbuf->pages);
kfree(tidbuf->psets);
kfree(tidbuf); kfree(tidbuf);
return ret > 0 ? 0 : ret; kfree(tidlist);
return ret;
} }
int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
...@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, ...@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
mutex_lock(&uctxt->exp_mutex); mutex_lock(&uctxt->exp_mutex);
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); ret = unprogram_rcvarray(fd, tidinfo[tididx]);
if (ret) { if (ret) {
hfi1_cdbg(TID, "Failed to unprogram rcv array %d", hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
ret); ret);
...@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
} }
node->fdata = fd; node->fdata = fd;
mutex_init(&node->invalidate_mutex);
node->phys = page_to_phys(pages[0]); node->phys = page_to_phys(pages[0]);
node->npages = npages; node->npages = npages;
node->rcventry = rcventry; node->rcventry = rcventry;
...@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
&tid_mn_ops); &tid_mn_ops);
if (ret) if (ret)
goto out_unmap; goto out_unmap;
/*
* FIXME: This is in the wrong order, the notifier should be
* established before the pages are pinned by pin_rcv_pages.
*/
mmu_interval_read_begin(&node->notifier);
} }
fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
...@@ -745,8 +795,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -745,8 +795,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
return -EFAULT; return -EFAULT;
} }
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
struct tid_group **grp)
{ {
struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd; struct hfi1_devdata *dd = uctxt->dd;
...@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, ...@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
if (!node || node->rcventry != (uctxt->expected_base + rcventry)) if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF; return -EBADF;
if (grp)
*grp = node->grp;
if (fd->use_mn) if (fd->use_mn)
mmu_interval_notifier_remove(&node->notifier); mmu_interval_notifier_remove(&node->notifier);
cacheless_tid_rb_remove(fd, node); cacheless_tid_rb_remove(fd, node);
...@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, ...@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
return 0; return 0;
} }
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
{ {
struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd; struct hfi1_devdata *dd = uctxt->dd;
mutex_lock(&node->invalidate_mutex);
if (node->freed)
goto done;
node->freed = true;
trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
node->npages, node->npages,
node->notifier.interval_tree.start, node->phys, node->notifier.interval_tree.start, node->phys,
node->dma_addr); node->dma_addr);
/* /* Make sure device has seen the write before pages are unpinned */
* Make sure device has seen the write before we unpin the
* pages.
*/
hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
done:
mutex_unlock(&node->invalidate_mutex);
}
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
__clear_tid_node(fd, node);
node->grp->used--; node->grp->used--;
node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
...@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, ...@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
if (node->freed) if (node->freed)
return true; return true;
/* take action only if unmapping */
if (range->event != MMU_NOTIFY_UNMAP)
return true;
trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
node->notifier.interval_tree.start, node->notifier.interval_tree.start,
node->rcventry, node->npages, node->dma_addr); node->rcventry, node->npages, node->dma_addr);
node->freed = true;
/* clear the hardware rcvarray entry */
__clear_tid_node(fdata, node);
spin_lock(&fdata->invalid_lock); spin_lock(&fdata->invalid_lock);
if (fdata->invalid_tid_idx < uctxt->expected_count) { if (fdata->invalid_tid_idx < uctxt->expected_count) {
...@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, ...@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
return true; return true;
} }
static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct tid_user_buf *tidbuf =
container_of(mni, struct tid_user_buf, notifier);
/* take action only if unmapping */
if (range->event == MMU_NOTIFY_UNMAP) {
mutex_lock(&tidbuf->cover_mutex);
mmu_interval_set_seq(mni, cur_seq);
mutex_unlock(&tidbuf->cover_mutex);
}
return true;
}
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata, static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode) struct tid_rb_node *tnode)
{ {
......
...@@ -16,6 +16,8 @@ struct tid_pageset { ...@@ -16,6 +16,8 @@ struct tid_pageset {
}; };
struct tid_user_buf { struct tid_user_buf {
struct mmu_interval_notifier notifier;
struct mutex cover_mutex;
unsigned long vaddr; unsigned long vaddr;
unsigned long length; unsigned long length;
unsigned int npages; unsigned int npages;
...@@ -27,6 +29,7 @@ struct tid_user_buf { ...@@ -27,6 +29,7 @@ struct tid_user_buf {
struct tid_rb_node { struct tid_rb_node {
struct mmu_interval_notifier notifier; struct mmu_interval_notifier notifier;
struct hfi1_filedata *fdata; struct hfi1_filedata *fdata;
struct mutex invalidate_mutex; /* covers hw removal */
unsigned long phys; unsigned long phys;
struct tid_group *grp; struct tid_group *grp;
u32 rcventry; u32 rcventry;
......
...@@ -98,11 +98,11 @@ enum rxe_device_param { ...@@ -98,11 +98,11 @@ enum rxe_device_param {
RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX, RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
RXE_MIN_MR_INDEX = 0x00000001, RXE_MIN_MR_INDEX = 0x00000001,
RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE, RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1,
RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX, RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
RXE_MIN_MW_INDEX = 0x00010001, RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1,
RXE_MAX_MW_INDEX = 0x00020000, RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE,
RXE_MAX_MW = 0x00001000, RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
RXE_MAX_PKT_PER_ACK = 64, RXE_MAX_PKT_PER_ACK = 64,
......
...@@ -23,16 +23,16 @@ static const struct rxe_type_info { ...@@ -23,16 +23,16 @@ static const struct rxe_type_info {
.size = sizeof(struct rxe_ucontext), .size = sizeof(struct rxe_ucontext),
.elem_offset = offsetof(struct rxe_ucontext, elem), .elem_offset = offsetof(struct rxe_ucontext, elem),
.min_index = 1, .min_index = 1,
.max_index = UINT_MAX, .max_index = RXE_MAX_UCONTEXT,
.max_elem = UINT_MAX, .max_elem = RXE_MAX_UCONTEXT,
}, },
[RXE_TYPE_PD] = { [RXE_TYPE_PD] = {
.name = "pd", .name = "pd",
.size = sizeof(struct rxe_pd), .size = sizeof(struct rxe_pd),
.elem_offset = offsetof(struct rxe_pd, elem), .elem_offset = offsetof(struct rxe_pd, elem),
.min_index = 1, .min_index = 1,
.max_index = UINT_MAX, .max_index = RXE_MAX_PD,
.max_elem = UINT_MAX, .max_elem = RXE_MAX_PD,
}, },
[RXE_TYPE_AH] = { [RXE_TYPE_AH] = {
.name = "ah", .name = "ah",
...@@ -40,7 +40,7 @@ static const struct rxe_type_info { ...@@ -40,7 +40,7 @@ static const struct rxe_type_info {
.elem_offset = offsetof(struct rxe_ah, elem), .elem_offset = offsetof(struct rxe_ah, elem),
.min_index = RXE_MIN_AH_INDEX, .min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX, .max_index = RXE_MAX_AH_INDEX,
.max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1, .max_elem = RXE_MAX_AH,
}, },
[RXE_TYPE_SRQ] = { [RXE_TYPE_SRQ] = {
.name = "srq", .name = "srq",
...@@ -49,7 +49,7 @@ static const struct rxe_type_info { ...@@ -49,7 +49,7 @@ static const struct rxe_type_info {
.cleanup = rxe_srq_cleanup, .cleanup = rxe_srq_cleanup,
.min_index = RXE_MIN_SRQ_INDEX, .min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX,
.max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1, .max_elem = RXE_MAX_SRQ,
}, },
[RXE_TYPE_QP] = { [RXE_TYPE_QP] = {
.name = "qp", .name = "qp",
...@@ -58,7 +58,7 @@ static const struct rxe_type_info { ...@@ -58,7 +58,7 @@ static const struct rxe_type_info {
.cleanup = rxe_qp_cleanup, .cleanup = rxe_qp_cleanup,
.min_index = RXE_MIN_QP_INDEX, .min_index = RXE_MIN_QP_INDEX,
.max_index = RXE_MAX_QP_INDEX, .max_index = RXE_MAX_QP_INDEX,
.max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1, .max_elem = RXE_MAX_QP,
}, },
[RXE_TYPE_CQ] = { [RXE_TYPE_CQ] = {
.name = "cq", .name = "cq",
...@@ -66,8 +66,8 @@ static const struct rxe_type_info { ...@@ -66,8 +66,8 @@ static const struct rxe_type_info {
.elem_offset = offsetof(struct rxe_cq, elem), .elem_offset = offsetof(struct rxe_cq, elem),
.cleanup = rxe_cq_cleanup, .cleanup = rxe_cq_cleanup,
.min_index = 1, .min_index = 1,
.max_index = UINT_MAX, .max_index = RXE_MAX_CQ,
.max_elem = UINT_MAX, .max_elem = RXE_MAX_CQ,
}, },
[RXE_TYPE_MR] = { [RXE_TYPE_MR] = {
.name = "mr", .name = "mr",
...@@ -76,7 +76,7 @@ static const struct rxe_type_info { ...@@ -76,7 +76,7 @@ static const struct rxe_type_info {
.cleanup = rxe_mr_cleanup, .cleanup = rxe_mr_cleanup,
.min_index = RXE_MIN_MR_INDEX, .min_index = RXE_MIN_MR_INDEX,
.max_index = RXE_MAX_MR_INDEX, .max_index = RXE_MAX_MR_INDEX,
.max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1, .max_elem = RXE_MAX_MR,
}, },
[RXE_TYPE_MW] = { [RXE_TYPE_MW] = {
.name = "mw", .name = "mw",
...@@ -85,7 +85,7 @@ static const struct rxe_type_info { ...@@ -85,7 +85,7 @@ static const struct rxe_type_info {
.cleanup = rxe_mw_cleanup, .cleanup = rxe_mw_cleanup,
.min_index = RXE_MIN_MW_INDEX, .min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX, .max_index = RXE_MAX_MW_INDEX,
.max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1, .max_elem = RXE_MAX_MW,
}, },
}; };
......
...@@ -470,12 +470,16 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, ...@@ -470,12 +470,16 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (sgt_append->prv) { if (sgt_append->prv) {
unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
if (WARN_ON(offset)) if (WARN_ON(offset))
return -EINVAL; return -EINVAL;
/* Merge contiguous pages into the last SG */ /* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length; prv_len = sgt_append->prv->length;
last_pg = sg_page(sgt_append->prv); if (page_to_pfn(pages[0]) == next_pfn) {
last_pg = pfn_to_page(next_pfn - 1);
while (n_pages && pages_are_mergeable(pages[0], last_pg)) { while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
if (sgt_append->prv->length + PAGE_SIZE > max_segment) if (sgt_append->prv->length + PAGE_SIZE > max_segment)
break; break;
...@@ -487,6 +491,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, ...@@ -487,6 +491,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
if (!n_pages) if (!n_pages)
goto out; goto out;
} }
}
/* compute number of contiguous chunks */ /* compute number of contiguous chunks */
chunks = 1; chunks = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment