Commit 895420dd authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford

staging/rdma/hfi1: Remove hfi1 MR and hfi1 specific qp type

This patch does the actual removal of the queue pair from the hfi1 driver
along with a number of dependent data structures. These were moved to rvt.

It also removes the MR functions to use those in rdmavt.

These two pieces can not reasonably be split apart becuase they depend on
each other.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8f1764fa
......@@ -8,7 +8,7 @@
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := chip.o cq.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \
init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \
init.o intr.o mad.o mmap.o pcie.o pio.o pio_copy.o \
qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o
hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
......
......@@ -479,7 +479,7 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (cq->ip) {
struct hfi1_ibdev *dev = to_idev(ibcq->device);
struct hfi1_mmap_info *ip = cq->ip;
struct rvt_mmap_info *ip = cq->ip;
hfi1_update_mmap_info(dev, ip, sz, wc);
......
......@@ -1603,7 +1603,7 @@ int snoop_recv_handler(struct hfi1_packet *packet)
/*
* Handle snooping and capturing packets when sdma is being used.
*/
int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
......@@ -1616,13 +1616,13 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
* bypass packets. The only way to send a bypass packet currently is to use the
* diagpkt interface. When that interface is enable snoop/capture is not.
*/
int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
struct hfi1_qp_priv *priv = qp->priv;
struct ahg_ib_header *ahdr = priv->s_hdr;
u32 hdrwords = qp->s_hdrwords;
struct hfi1_sge_state *ss = qp->s_cur_sge;
struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
u32 dwords = (len + 3) >> 2;
u32 plen = hdrwords + dwords + 2; /* includes pbc */
......@@ -1630,7 +1630,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
struct snoop_packet *s_packet = NULL;
u32 *hdr = (u32 *)&ahdr->ibh;
u32 length = 0;
struct hfi1_sge_state temp_ss;
struct rvt_sge_state temp_ss;
void *data = NULL;
void *data_start = NULL;
int ret;
......
......@@ -318,7 +318,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
/* Get the destination QP number. */
qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
struct hfi1_qp *qp;
struct rvt_qp *qp;
unsigned long flags;
rcu_read_lock();
......@@ -387,7 +387,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
* Only in pre-B0 h/w is the CNP_OPCODE handled
* via this code path.
*/
struct hfi1_qp *qp = NULL;
struct rvt_qp *qp = NULL;
u32 lqpn, rqpn;
u16 rlid;
u8 svc_type, sl, sc5;
......@@ -456,7 +456,7 @@ static void prescan_rxq(struct hfi1_packet *packet) {}
#else /* !CONFIG_PRESCAN_RXQ */
static int prescan_receive_queue;
static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
struct hfi1_other_headers *ohdr,
u64 rhf, u32 bth1, struct ib_grh *grh)
{
......@@ -595,7 +595,7 @@ static void prescan_rxq(struct hfi1_packet *packet)
struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
__le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head +
dd->rhf_offset;
struct hfi1_qp *qp;
struct rvt_qp *qp;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr;
struct ib_grh *grh = NULL;
......@@ -770,7 +770,7 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd;
struct hfi1_qp *qp, *nqp;
struct rvt_qp *qp, *nqp;
rcd = packet->rcd;
rcd->head = packet->rhqoff;
......
......@@ -334,7 +334,7 @@ struct hfi1_packet {
void *hdr;
struct hfi1_ctxtdata *rcd;
__le32 *rhf_addr;
struct hfi1_qp *qp;
struct rvt_qp *qp;
struct hfi1_other_headers *ohdr;
u64 rhf;
u32 maxcnt;
......@@ -374,7 +374,7 @@ struct hfi1_snoop_data {
#define HFI1_PORT_SNOOP_MODE 1U
#define HFI1_PORT_CAPTURE_MODE 2U
struct hfi1_sge_state;
struct rvt_sge_state;
/*
* Get/Set IB link-level config parameters for f_get/set_ib_cfg()
......@@ -1091,9 +1091,9 @@ struct hfi1_devdata {
* Handlers for outgoing data so that snoop/capture does not
* have to have its hooks in the send path
*/
int (*process_pio_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int (*process_pio_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
int (*process_dma_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int (*process_dma_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
......@@ -1276,7 +1276,7 @@ static inline u32 egress_cycles(u32 len, u32 rate)
void set_link_ipg(struct hfi1_pportdata *ppd);
void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
u32 rqpn, u8 svc_type);
void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh);
......@@ -1468,9 +1468,9 @@ void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
int snoop_recv_handler(struct hfi1_packet *packet);
int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
......@@ -1682,7 +1682,7 @@ int process_receive_invalid(struct hfi1_packet *packet);
extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
void update_sge(struct hfi1_sge_state *ss, u32 length);
void update_sge(struct rvt_sge_state *ss, u32 length);
/* global module parameter variables */
extern unsigned int hfi1_max_mtu;
......
/*
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2015 Intel Corporation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "hfi.h"
/**
* hfi1_alloc_lkey - allocate an lkey
* @mr: memory region that this lkey protects
* @dma_region: 0->normal key, 1->restricted DMA key
*
* Returns 0 if successful, otherwise returns -errno.
*
* Increments mr reference count as required.
*
* Sets the lkey field mr for non-dma regions.
*
*/
int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
int ret = 0;
struct hfi1_ibdev *dev = to_idev(mr->pd->device);
struct rvt_lkey_table *rkt = &dev->lk_table;
hfi1_get_mr(mr);
spin_lock_irqsave(&rkt->lock, flags);
/* special case for dma_mr lkey == 0 */
if (dma_region) {
struct rvt_mregion *tmr;
tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
rcu_assign_pointer(dev->dma_mr, mr);
mr->lkey_published = 1;
} else {
hfi1_put_mr(mr);
}
goto success;
}
/* Find the next available LKEY */
r = rkt->next;
n = r;
for (;;) {
if (!rcu_access_pointer(rkt->table[r]))
break;
r = (r + 1) & (rkt->max - 1);
if (r == n)
goto bail;
}
rkt->next = (r + 1) & (rkt->max - 1);
/*
* Make sure lkey is never zero which is reserved to indicate an
* unrestricted LKEY.
*/
rkt->gen++;
/*
* bits are capped in verbs.c to ensure enough bits for
* generation number
*/
mr->lkey = (r << (32 - hfi1_lkey_table_size)) |
((((1 << (24 - hfi1_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
rkt->gen++;
}
rcu_assign_pointer(rkt->table[r], mr);
mr->lkey_published = 1;
success:
spin_unlock_irqrestore(&rkt->lock, flags);
out:
return ret;
bail:
hfi1_put_mr(mr);
spin_unlock_irqrestore(&rkt->lock, flags);
ret = -ENOMEM;
goto out;
}
/**
* hfi1_free_lkey - free an lkey
* @mr: mr to free from tables
*/
void hfi1_free_lkey(struct rvt_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
struct hfi1_ibdev *dev = to_idev(mr->pd->device);
struct rvt_lkey_table *rkt = &dev->lk_table;
int freed = 0;
spin_lock_irqsave(&rkt->lock, flags);
if (!mr->lkey_published)
goto out;
if (lkey == 0)
RCU_INIT_POINTER(dev->dma_mr, NULL);
else {
r = lkey >> (32 - hfi1_lkey_table_size);
RCU_INIT_POINTER(rkt->table[r], NULL);
}
mr->lkey_published = 0;
freed++;
out:
spin_unlock_irqrestore(&rkt->lock, flags);
if (freed) {
synchronize_rcu();
hfi1_put_mr(mr);
}
}
/**
* hfi1_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
* @pd: protection domain
* @isge: outgoing internal SGE
* @sge: SGE to check
* @acc: access flags
*
* Return 1 if valid and successful, otherwise returns 0.
*
* increments the reference count upon success
*
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct hfi1_sge *isge, struct ib_sge *sge, int acc)
{
struct rvt_mregion *mr;
unsigned n, m;
size_t off;
/*
* We use LKEY == zero for kernel virtual addresses
* (see hfi1_get_dma_mr and dma.c).
*/
rcu_read_lock();
if (sge->lkey == 0) {
struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
atomic_inc(&mr->refcount);
rcu_read_unlock();
isge->mr = mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
isge->m = 0;
isge->n = 0;
goto ok;
}
mr = rcu_dereference(
rkt->table[(sge->lkey >> (32 - hfi1_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
off = sge->addr - mr->user_base;
if (unlikely(sge->addr < mr->user_base ||
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
atomic_inc(&mr->refcount);
rcu_read_unlock();
off += mr->offset;
if (mr->page_shift) {
/*
page sizes are uniform power of 2 so no loop is necessary
entries_spanned_by_off is the number of times the loop below
would have executed.
*/
size_t entries_spanned_by_off;
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
m = entries_spanned_by_off / RVT_SEGSZ;
n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
}
}
isge->mr = mr;
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
isge->length = mr->map[m]->segs[n].length - off;
isge->sge_length = sge->length;
isge->m = m;
isge->n = n;
ok:
return 1;
bail:
rcu_read_unlock();
return 0;
}
/**
* hfi1_rkey_ok - check the IB virtual address, length, and RKEY
* @qp: qp for validation
* @sge: SGE state
* @len: length of data
* @vaddr: virtual address to place data
* @rkey: rkey to check
* @acc: access flags
*
* Return 1 if successful, otherwise 0.
*
* increments the reference count upon success
*/
int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct rvt_mregion *mr;
unsigned n, m;
size_t off;
/*
* We use RKEY == zero for kernel virtual addresses
* (see hfi1_get_dma_mr and dma.c).
*/
rcu_read_lock();
if (rkey == 0) {
struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
atomic_inc(&mr->refcount);
rcu_read_unlock();
sge->mr = mr;
sge->vaddr = (void *) vaddr;
sge->length = len;
sge->sge_length = len;
sge->m = 0;
sge->n = 0;
goto ok;
}
mr = rcu_dereference(
rkt->table[(rkey >> (32 - hfi1_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
off = vaddr - mr->iova;
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
atomic_inc(&mr->refcount);
rcu_read_unlock();
off += mr->offset;
if (mr->page_shift) {
/*
page sizes are uniform power of 2 so no loop is necessary
entries_spanned_by_off is the number of times the loop below
would have executed.
*/
size_t entries_spanned_by_off;
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
m = entries_spanned_by_off / RVT_SEGSZ;
n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
}
}
sge->mr = mr;
sge->vaddr = mr->map[m]->segs[n].vaddr + off;
sge->length = mr->map[m]->segs[n].length - off;
sge->sge_length = len;
sge->m = m;
sge->n = n;
ok:
return 1;
bail:
rcu_read_unlock();
return 0;
}
......@@ -59,12 +59,12 @@
/**
* hfi1_release_mmap_info - free mmap info structure
* @ref: a pointer to the kref within struct hfi1_mmap_info
* @ref: a pointer to the kref within struct rvt_mmap_info
*/
void hfi1_release_mmap_info(struct kref *ref)
{
struct hfi1_mmap_info *ip =
container_of(ref, struct hfi1_mmap_info, ref);
struct rvt_mmap_info *ip =
container_of(ref, struct rvt_mmap_info, ref);
struct hfi1_ibdev *dev = to_idev(ip->context->device);
spin_lock_irq(&dev->pending_lock);
......@@ -81,14 +81,14 @@ void hfi1_release_mmap_info(struct kref *ref)
*/
static void hfi1_vma_open(struct vm_area_struct *vma)
{
struct hfi1_mmap_info *ip = vma->vm_private_data;
struct rvt_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
}
static void hfi1_vma_close(struct vm_area_struct *vma)
{
struct hfi1_mmap_info *ip = vma->vm_private_data;
struct rvt_mmap_info *ip = vma->vm_private_data;
kref_put(&ip->ref, hfi1_release_mmap_info);
}
......@@ -109,7 +109,7 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct hfi1_ibdev *dev = to_idev(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct hfi1_mmap_info *ip, *pp;
struct rvt_mmap_info *ip, *pp;
int ret = -EINVAL;
/*
......@@ -146,11 +146,11 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
/*
* Allocate information for hfi1_mmap
*/
struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
u32 size,
struct ib_ucontext *context,
void *obj) {
struct hfi1_mmap_info *ip;
struct rvt_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
u32 size,
struct ib_ucontext *context,
void *obj) {
struct rvt_mmap_info *ip;
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
......@@ -175,7 +175,7 @@ struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
return ip;
}
void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct rvt_mmap_info *ip,
u32 size, void *obj)
{
size = PAGE_ALIGN(size);
......
This diff is collapsed.
......@@ -1526,8 +1526,8 @@ static void sc_piobufavail(struct send_context *sc)
struct hfi1_devdata *dd = sc->dd;
struct hfi1_ibdev *dev = &dd->verbs_dev;
struct list_head *list;
struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE];
struct hfi1_qp *qp;
struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
unsigned long flags;
unsigned i, n = 0;
......
This diff is collapsed.
......@@ -80,7 +80,7 @@ struct hfi1_qpn_table {
struct hfi1_qp_ibdev {
u32 qp_table_size;
u32 qp_table_bits;
struct hfi1_qp __rcu **qp_table;
struct rvt_qp __rcu **qp_table;
spinlock_t qpt_lock;
struct hfi1_qpn_table qpn_table;
};
......@@ -98,10 +98,10 @@ static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn)
* The caller must hold the rcu_read_lock(), and keep the lock until
* the returned qp is no longer in use.
*/
static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
u32 qpn) __must_hold(RCU)
static inline struct rvt_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
u32 qpn) __must_hold(RCU)
{
struct hfi1_qp *qp = NULL;
struct rvt_qp *qp = NULL;
if (unlikely(qpn <= 1)) {
qp = rcu_dereference(ibp->qp[qpn]);
......@@ -117,11 +117,10 @@ static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
return qp;
}
/**
* clear_ahg - reset ahg status in qp
* @qp - qp pointer
/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct hfi1_qp *qp)
static inline void clear_ahg(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
......@@ -142,7 +141,7 @@ static inline void clear_ahg(struct hfi1_qp *qp)
* The QP r_lock and s_lock should be held and interrupts disabled.
* If we are already in error state, just return.
*/
int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err);
int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
/**
* hfi1_modify_qp - modify the attributes of a queue pair
......@@ -165,7 +164,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Returns the AETH.
*/
__be32 hfi1_compute_aeth(struct hfi1_qp *qp);
__be32 hfi1_compute_aeth(struct rvt_qp *qp);
/**
* hfi1_create_qp - create a queue pair for a device
......@@ -198,7 +197,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp);
*
* The QP s_lock should be held.
*/
void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth);
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth);
/**
* hfi1_qp_init - allocate QP tables
......@@ -217,9 +216,9 @@ void hfi1_qp_exit(struct hfi1_ibdev *dev);
* @qp: the QP
* @flag: flag the qp on which the qp is stalled
*/
void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag);
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag);
struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5);
struct qp_iter;
......@@ -246,7 +245,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
* qp_comm_est - handle trap with QP established
* @qp: the QP
*/
void qp_comm_est(struct hfi1_qp *qp);
void qp_comm_est(struct rvt_qp *qp);
/**
* _hfi1_schedule_send - schedule progress
......@@ -257,7 +256,7 @@ void qp_comm_est(struct hfi1_qp *qp);
* It is only used in the post send, which doesn't hold
* the s_lock.
*/
static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
static inline void _hfi1_schedule_send(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp =
......@@ -278,12 +277,12 @@ static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
* This schedules qp progress and caller should hold
* the s_lock.
*/
static inline void hfi1_schedule_send(struct hfi1_qp *qp)
static inline void hfi1_schedule_send(struct rvt_qp *qp)
{
if (hfi1_send_ok(qp))
_hfi1_schedule_send(qp);
}
void hfi1_migrate_qp(struct hfi1_qp *qp);
void hfi1_migrate_qp(struct rvt_qp *qp);
#endif /* _QP_H */
This diff is collapsed.
......@@ -97,15 +97,15 @@ const u32 ib_hfi1_rnr_table[32] = {
* Validate a RWQE and fill in the SGE state.
* Return 1 if OK.
*/
static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
{
int i, j, ret;
struct ib_wc wc;
struct rvt_lkey_table *rkt;
struct rvt_pd *pd;
struct hfi1_sge_state *ss;
struct rvt_sge_state *ss;
rkt = &to_idev(qp->ibqp.device)->lk_table;
rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list;
......@@ -114,8 +114,8 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
......@@ -127,9 +127,9 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
bad_lkey:
while (j) {
struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
hfi1_put_mr(sge->mr);
rvt_put_mr(sge->mr);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
......@@ -154,13 +154,13 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
*
* Can be called from interrupt level.
*/
int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only)
{
unsigned long flags;
struct hfi1_rq *rq;
struct hfi1_rwq *wq;
struct rvt_rq *rq;
struct rvt_rwq *wq;
struct hfi1_srq *srq;
struct hfi1_rwqe *wqe;
struct rvt_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
......@@ -265,7 +265,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
* The s_lock will be acquired around the hfi1_migrate_qp() call.
*/
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
int has_grh, struct hfi1_qp *qp, u32 bth0)
int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
unsigned long flags;
......@@ -355,12 +355,12 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
static void ruc_loopback(struct hfi1_qp *sqp)
static void ruc_loopback(struct rvt_qp *sqp)
{
struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct hfi1_qp *qp;
struct hfi1_swqe *wqe;
struct hfi1_sge *sge;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
struct rvt_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
......@@ -461,11 +461,10 @@ static void ruc_loopback(struct hfi1_qp *sqp)
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
if (wqe->length == 0)
break;
if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_WRITE)))
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_WRITE)))
goto acc_err;
qp->r_sge.sg_list = NULL;
qp->r_sge.num_sge = 1;
......@@ -475,10 +474,10 @@ static void ruc_loopback(struct hfi1_qp *sqp)
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_READ)))
if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_READ)))
goto acc_err;
release = 0;
sqp->s_sge.sg_list = NULL;
......@@ -493,10 +492,10 @@ static void ruc_loopback(struct hfi1_qp *sqp)
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
......@@ -506,7 +505,7 @@ static void ruc_loopback(struct hfi1_qp *sqp)
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap);
hfi1_put_mr(qp->r_sge.sge.mr);
rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
goto send_comp;
......@@ -530,7 +529,7 @@ static void ruc_loopback(struct hfi1_qp *sqp)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
hfi1_put_mr(sge->mr);
rvt_put_mr(sge->mr);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
......@@ -690,7 +689,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
* Subsequent middles use the copied entry, editing the
* PSN with 1 or 2 edits.
*/
static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{
struct hfi1_qp_priv *priv = qp->priv;
if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
......@@ -734,7 +733,7 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
}
}
void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
u32 bth0, u32 bth2, int middle)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
......@@ -812,9 +811,9 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
void hfi1_do_send(struct work_struct *work)
{
struct iowait *wait = container_of(work, struct iowait, iowork);
struct hfi1_qp *qp = iowait_to_qp(wait);
struct rvt_qp *qp = iowait_to_qp(wait);
struct hfi1_pkt_state ps;
int (*make_req)(struct hfi1_qp *qp);
int (*make_req)(struct rvt_qp *qp);
unsigned long flags;
unsigned long timeout;
......@@ -876,7 +875,7 @@ void hfi1_do_send(struct work_struct *work)
/*
* This should be called with s_lock held.
*/
void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
......@@ -886,9 +885,9 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
return;
for (i = 0; i < wqe->wr.num_sge; i++) {
struct hfi1_sge *sge = &wqe->sg_list[i];
struct rvt_sge *sge = &wqe->sg_list[i];
hfi1_put_mr(sge->mr);
rvt_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
......
......@@ -379,10 +379,10 @@ struct sdma_txreq {
struct verbs_txreq {
struct hfi1_pio_header phdr;
struct sdma_txreq txreq;
struct hfi1_qp *qp;
struct hfi1_swqe *wqe;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
struct rvt_mregion *mr;
struct hfi1_sge_state *ss;
struct rvt_sge_state *ss;
struct sdma_engine *sde;
u16 hdr_dwords;
u16 hdr_inx;
......
......@@ -66,12 +66,12 @@ int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct hfi1_srq *srq = to_isrq(ibsrq);
struct hfi1_rwq *wq;
struct rvt_rwq *wq;
unsigned long flags;
int ret;
for (; wr; wr = wr->next) {
struct hfi1_rwqe *wqe;
struct rvt_rwqe *wqe;
u32 next;
int i;
......@@ -149,8 +149,8 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
srq->rq.size = srq_init_attr->attr.max_wr + 1;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct hfi1_rwqe);
srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz);
sizeof(struct rvt_rwqe);
srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
......@@ -162,7 +162,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
u32 s = sizeof(struct hfi1_rwq) + srq->rq.size * sz;
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip =
hfi1_create_mmap_info(dev, s, ibpd->uobject->context,
......@@ -230,12 +230,12 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
struct ib_udata *udata)
{
struct hfi1_srq *srq = to_isrq(ibsrq);
struct hfi1_rwq *wq;
struct rvt_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
struct hfi1_rwq *owq;
struct hfi1_rwqe *p;
struct rvt_rwq *owq;
struct rvt_rwqe *p;
u32 sz, size, n, head, tail;
/* Check that the requested sizes are below the limits. */
......@@ -246,10 +246,10 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto bail;
}
sz = sizeof(struct hfi1_rwqe) +
sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz);
wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
if (!wq) {
ret = -ENOMEM;
goto bail;
......@@ -296,7 +296,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
n = 0;
p = wq->wq;
while (tail != head) {
struct hfi1_rwqe *wqe;
struct rvt_rwqe *wqe;
int i;
wqe = get_rwqe_ptr(&srq->rq, tail);
......@@ -305,7 +305,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
p = (struct hfi1_rwqe *)((char *)p + sz);
p = (struct rvt_rwqe *)((char *)p + sz);
if (++tail >= srq->rq.size)
tail = 0;
}
......@@ -320,9 +320,9 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
vfree(owq);
if (srq->ip) {
struct hfi1_mmap_info *ip = srq->ip;
struct rvt_mmap_info *ip = srq->ip;
struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device);
u32 s = sizeof(struct hfi1_rwq) + size * sz;
u32 s = sizeof(struct rvt_rwq) + size * sz;
hfi1_update_mmap_info(dev, ip, s, wq);
......
......@@ -332,7 +332,7 @@ TRACE_EVENT(hfi1_wantpiointr,
);
DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
TP_PROTO(struct hfi1_qp *qp, u32 flags),
TP_PROTO(struct rvt_qp *qp, u32 flags),
TP_ARGS(qp, flags),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
......@@ -356,17 +356,17 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
);
DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
TP_PROTO(struct hfi1_qp *qp, u32 flags),
TP_PROTO(struct rvt_qp *qp, u32 flags),
TP_ARGS(qp, flags));
DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
TP_PROTO(struct hfi1_qp *qp, u32 flags),
TP_PROTO(struct rvt_qp *qp, u32 flags),
TP_ARGS(qp, flags));
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_qphash
DECLARE_EVENT_CLASS(hfi1_qphash_template,
TP_PROTO(struct hfi1_qp *qp, u32 bucket),
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
......@@ -387,11 +387,11 @@ DECLARE_EVENT_CLASS(hfi1_qphash_template,
);
DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
TP_PROTO(struct hfi1_qp *qp, u32 bucket),
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
TP_PROTO(struct hfi1_qp *qp, u32 bucket),
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
#undef TRACE_SYSTEM
......@@ -1292,7 +1292,7 @@ TRACE_EVENT(hfi1_sdma_state,
#define TRACE_SYSTEM hfi1_rc
DECLARE_EVENT_CLASS(hfi1_rc_template,
TP_PROTO(struct hfi1_qp *qp, u32 psn),
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
......@@ -1331,22 +1331,22 @@ DECLARE_EVENT_CLASS(hfi1_rc_template,
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
TP_PROTO(struct hfi1_qp *qp, u32 psn),
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
TP_PROTO(struct hfi1_qp *qp, u32 psn),
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
TP_PROTO(struct hfi1_qp *qp, u32 psn),
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
TP_PROTO(struct hfi1_qp *qp, u32 psn),
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
......
......@@ -61,11 +61,11 @@
*
* Return 1 if constructed; otherwise, return 0.
*/
int hfi1_make_uc_req(struct hfi1_qp *qp)
int hfi1_make_uc_req(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
struct hfi1_swqe *wqe;
struct rvt_swqe *wqe;
unsigned long flags;
u32 hwords = 5;
u32 bth0 = 0;
......@@ -267,7 +267,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
struct hfi1_qp *qp = packet->qp;
struct rvt_qp *qp = packet->qp;
struct hfi1_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
......@@ -492,8 +492,8 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
int ok;
/* Check rkey */
ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
qp->r_sge.num_sge = 1;
......
......@@ -65,15 +65,15 @@
* Note that the receive interrupt handler may be calling hfi1_ud_rcv()
* while this is being called.
*/
static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
{
struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct hfi1_pportdata *ppd;
struct hfi1_qp *qp;
struct rvt_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
struct hfi1_sge_state ssge;
struct hfi1_sge *sge;
struct rvt_sge_state ssge;
struct rvt_sge *sge;
struct ib_wc wc;
u32 length;
enum ib_qp_type sqptype, dqptype;
......@@ -262,14 +262,14 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
*
* Return 1 if constructed; otherwise, return 0.
*/
int hfi1_make_ud_req(struct hfi1_qp *qp)
int hfi1_make_ud_req(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
struct hfi1_swqe *wqe;
struct rvt_swqe *wqe;
unsigned long flags;
u32 nwords;
u32 extra_bytes;
......@@ -477,7 +477,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
return -1;
}
void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh)
{
......@@ -551,7 +551,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
* opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
*/
static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
struct hfi1_qp *qp, u16 slid, struct opa_smp *smp)
struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
......@@ -655,7 +655,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
struct hfi1_qp *qp = packet->qp;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
bool sc4_bit = has_sc4_bit(packet);
u8 sc;
......
This diff is collapsed.
This diff is collapsed.
......@@ -56,7 +56,7 @@
* mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp)
static struct hfi1_mcast_qp *mcast_qp_alloc(struct rvt_qp *qp)
{
struct hfi1_mcast_qp *mqp;
......@@ -73,7 +73,7 @@ static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp)
static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
{
struct hfi1_qp *qp = mqp->qp;
struct rvt_qp *qp = mqp->qp;
/* Notify hfi1_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
......@@ -241,7 +241,7 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct hfi1_qp *qp = to_iqp(ibqp);
struct rvt_qp *qp = to_iqp(ibqp);
struct hfi1_ibdev *dev = to_idev(ibqp->device);
struct hfi1_ibport *ibp;
struct hfi1_mcast *mcast;
......@@ -299,7 +299,7 @@ int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct hfi1_qp *qp = to_iqp(ibqp);
struct rvt_qp *qp = to_iqp(ibqp);
struct hfi1_ibdev *dev = to_idev(ibqp->device);
struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct hfi1_mcast *mcast = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment