Commit b038ced7 authored by Steve Wise's avatar Steve Wise Committed by Roland Dreier

RDMA/cxgb3: Add driver for Chelsio T3 RNIC

Add an RDMA/iWARP driver for the Chelsio T3 1GbE and 10GbE adapters.
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent c7f743a6
......@@ -38,6 +38,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
......@@ -3,6 +3,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver"
depends on CHELSIO_T3 && INFINIBAND && INET
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
10GbE adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.htm>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called iw_cxgb3.
config INFINIBAND_CXGB3_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_CXGB3
default n
---help---
This option causes the Chelsio RDMA driver to produce copious
amounts of debug messages. Select this if you are developing
the driver or trying to diagnose a problem.
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 \
-I$(TOPDIR)/drivers/infiniband/hw/cxgb3/core
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
EXTRA_CFLAGS += -DDEBUG
iw_cxgb3-y += cxio_dbg.o
endif
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef DEBUG
#include <linux/types.h>
#include "common.h"
#include "cxgb3_ioctl.h"
#include "cxio_hal.h"
#include "cxio_wr.h"
void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
{
struct ch_mem_range *m;
u64 *data;
int rc;
int size = 32;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
return;
}
m->mem_id = MEM_PMRX;
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
m->len = size;
PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
if (rc) {
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
kfree(m);
return;
}
data = (u64 *)m->buf;
while (size > 0) {
PDBG("TPT %08x: %016llx\n", m->addr, (unsigned long long) *data);
size -= 8;
data++;
m->addr += 8;
}
kfree(m);
}
void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
{
struct ch_mem_range *m;
u64 *data;
int rc;
int size, npages;
shift += 12;
npages = (len + (1ULL << shift) - 1) >> shift;
size = npages * sizeof(u64);
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
return;
}
m->mem_id = MEM_PMRX;
m->addr = pbl_addr;
m->len = size;
PDBG("%s PBL addr 0x%x len %d depth %d\n",
__FUNCTION__, m->addr, m->len, npages);
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
if (rc) {
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
kfree(m);
return;
}
data = (u64 *)m->buf;
while (size > 0) {
PDBG("PBL %08x: %016llx\n", m->addr, (unsigned long long) *data);
size -= 8;
data++;
m->addr += 8;
}
kfree(m);
}
void cxio_dump_wqe(union t3_wr *wqe)
{
__be64 *data = (__be64 *)wqe;
uint size = (uint)(be64_to_cpu(*data) & 0xff);
if (size == 0)
size = 8;
while (size > 0) {
PDBG("WQE %p: %016llx\n", data,
(unsigned long long) be64_to_cpu(*data));
size--;
data++;
}
}
void cxio_dump_wce(struct t3_cqe *wce)
{
__be64 *data = (__be64 *)wce;
int size = sizeof(*wce);
while (size > 0) {
PDBG("WCE %p: %016llx\n", data,
(unsigned long long) be64_to_cpu(*data));
size -= 8;
data++;
}
}
void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
{
struct ch_mem_range *m;
int size = nents * 64;
u64 *data;
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
return;
}
m->mem_id = MEM_PMRX;
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
m->len = size;
PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
if (rc) {
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
kfree(m);
return;
}
data = (u64 *)m->buf;
while (size > 0) {
PDBG("RQT %08x: %016llx\n", m->addr, (unsigned long long) *data);
size -= 8;
data++;
m->addr += 8;
}
kfree(m);
}
void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
{
struct ch_mem_range *m;
int size = TCB_SIZE;
u32 *data;
int rc;
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
if (!m) {
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
return;
}
m->mem_id = MEM_CM;
m->addr = hwtid * size;
m->len = size;
PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len);
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
if (rc) {
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
kfree(m);
return;
}
data = (u32 *)m->buf;
while (size > 0) {
printk("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n",
m->addr,
*(data+2), *(data+3), *(data),*(data+1),
*(data+6), *(data+7), *(data+4), *(data+5));
size -= 32;
data += 8;
m->addr += 32;
}
kfree(m);
}
#endif
This diff is collapsed.
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXIO_HAL_H__
#define __CXIO_HAL_H__
#include <linux/list.h>
#include <linux/mutex.h>
#include "t3_cpl.h"
#include "t3cdev.h"
#include "cxgb3_ctl_defs.h"
#include "cxio_wr.h"
#define T3_CTRL_QP_ID FW_RI_SGEEC_START
#define T3_CTL_QP_TID FW_RI_TID_START
#define T3_CTRL_QP_SIZE_LOG2 8
#define T3_CTRL_CQ_ID 0
/* TBD */
#define T3_MAX_NUM_RI (1<<15)
#define T3_MAX_NUM_QP (1<<15)
#define T3_MAX_NUM_CQ (1<<15)
#define T3_MAX_NUM_PD (1<<15)
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_NUM_STAG (1<<15)
#define T3_STAG_UNSET 0xffffffff
#define T3_MAX_DEV_NAME_LEN 32
struct cxio_hal_ctrl_qp {
u32 wptr;
u32 rptr;
struct mutex lock; /* for the wtpr, can sleep */
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
union t3_wr *workq; /* the work request queue */
dma_addr_t dma_addr; /* pci bus address of the workq */
DECLARE_PCI_UNMAP_ADDR(mapping)
void __iomem *doorbell;
};
struct cxio_hal_resource {
struct kfifo *tpt_fifo;
spinlock_t tpt_fifo_lock;
struct kfifo *qpid_fifo;
spinlock_t qpid_fifo_lock;
struct kfifo *cqid_fifo;
spinlock_t cqid_fifo_lock;
struct kfifo *pdid_fifo;
spinlock_t pdid_fifo_lock;
};
struct cxio_qpid_list {
struct list_head entry;
u32 qpid;
};
struct cxio_ucontext {
struct list_head qpids;
struct mutex lock;
};
struct cxio_rdev {
char dev_name[T3_MAX_DEV_NAME_LEN];
struct t3cdev *t3cdev_p;
struct rdma_info rnic_info;
struct adap_ports port_info;
struct cxio_hal_resource *rscp;
struct cxio_hal_ctrl_qp ctrl_qp;
void *ulp;
unsigned long qpshift;
u32 qpnr;
u32 qpmask;
struct cxio_ucontext uctx;
struct gen_pool *pbl_pool;
struct gen_pool *rqt_pool;
struct list_head entry;
};
static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
{
return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
}
typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
struct sk_buff * skb);
#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
struct respQ_msg_t {
__be32 flags; /* flit 0 */
__be32 cq_ptrid;
__be64 rsvd; /* flit 1 */
struct t3_cqe cqe; /* flits 2-3 */
};
enum t3_cq_opcode {
CQ_ARM_AN = 0x2,
CQ_ARM_SE = 0x6,
CQ_FORCE_AN = 0x3,
CQ_CREDIT_UPDATE = 0x7
};
int cxio_rdev_open(struct cxio_rdev *rdev);
void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit);
int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev, u32 qpid);
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
struct cxio_ucontext *uctx);
int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
struct cxio_ucontext *uctx);
int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
int cxio_allocate_stag(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr);
int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, __be64 *pbl, u32 *pbl_size,
u32 *pbl_addr);
int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, __be64 *pbl, u32 *pbl_size,
u32 *pbl_addr);
int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
u32 pbl_addr);
int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
u32 cxio_hal_get_rhdl(void);
void cxio_hal_put_rhdl(u32 rhdl);
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
int __init cxio_hal_init(void);
void __exit cxio_hal_exit(void);
void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_flush_hw_cq(struct t3_cq *cq);
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
u8 *cqe_flushed, u64 *cookie, u32 *credit);
#define MOD "iw_cxgb3: "
#define PDBG(fmt, args...) pr_debug(MOD fmt, ## args)
#ifdef DEBUG
void cxio_dump_tpt(struct cxio_rdev *rev, u32 stag);
void cxio_dump_pbl(struct cxio_rdev *rev, u32 pbl_addr, uint len, u8 shift);
void cxio_dump_wqe(union t3_wr *wqe);
void cxio_dump_wce(struct t3_cqe *wce);
void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents);
void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid);
#endif
#endif
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* Crude resource management */
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include "cxio_resource.h"
#include "cxio_hal.h"
static struct kfifo *rhdl_fifo;
static spinlock_t rhdl_fifo_lock;
#define RANDOM_SIZE 16
static int __cxio_init_resource_fifo(struct kfifo **fifo,
spinlock_t *fifo_lock,
u32 nr, u32 skip_low,
u32 skip_high,
int random)
{
u32 i, j, entry = 0, idx;
u32 random_bytes;
u32 rarray[16];
spin_lock_init(fifo_lock);
*fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock);
if (IS_ERR(*fifo))
return -ENOMEM;
for (i = 0; i < skip_low + skip_high; i++)
__kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
random_bytes = random32();
for (i = 0; i < RANDOM_SIZE; i++)
rarray[i] = i + skip_low;
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
if (j >= RANDOM_SIZE) {
j = 0;
random_bytes = random32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
__kfifo_put(*fifo,
(unsigned char *) &rarray[idx],
sizeof(u32));
rarray[idx] = i;
j++;
}
for (i = 0; i < RANDOM_SIZE; i++)
__kfifo_put(*fifo,
(unsigned char *) &rarray[i],
sizeof(u32));
} else
for (i = skip_low; i < nr - skip_high; i++)
__kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32));
for (i = 0; i < skip_low + skip_high; i++)
kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32));
return 0;
}
static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
skip_high, 0));
}
static int cxio_init_resource_fifo_random(struct kfifo **fifo,
spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
skip_high, 1));
}
static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
{
u32 i;
spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32),
GFP_KERNEL,
&rdev_p->rscp->qpid_fifo_lock);
if (IS_ERR(rdev_p->rscp->qpid_fifo))
return -ENOMEM;
for (i = 16; i < T3_MAX_NUM_QP; i++)
if (!(i & rdev_p->qpmask))
__kfifo_put(rdev_p->rscp->qpid_fifo,
(unsigned char *) &i, sizeof(u32));
return 0;
}
int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
{
return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
0);
}
void cxio_hal_destroy_rhdl_resource(void)
{
kfifo_free(rhdl_fifo);
}
/* nr_* must be power of 2 */
int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
u32 nr_tpt, u32 nr_pbl,
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
{
int err = 0;
struct cxio_hal_resource *rscp;
rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
if (!rscp)
return -ENOMEM;
rdev_p->rscp = rscp;
err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
&rscp->tpt_fifo_lock,
nr_tpt, 1, 0);
if (err)
goto tpt_err;
err = cxio_init_qpid_fifo(rdev_p);
if (err)
goto qpid_err;
err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
nr_cqid, 1, 0);
if (err)
goto cqid_err;
err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
nr_pdid, 1, 0);
if (err)
goto pdid_err;
return 0;
pdid_err:
kfifo_free(rscp->cqid_fifo);
cqid_err:
kfifo_free(rscp->qpid_fifo);
qpid_err:
kfifo_free(rscp->tpt_fifo);
tpt_err:
return -ENOMEM;
}
/*
* returns 0 if no resource available
*/
static inline u32 cxio_hal_get_resource(struct kfifo *fifo)
{
u32 entry;
if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32)))
return entry;
else
return 0; /* fifo emptry */
}
static inline void cxio_hal_put_resource(struct kfifo *fifo, u32 entry)
{
BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0);
}
u32 cxio_hal_get_rhdl(void)
{
return cxio_hal_get_resource(rhdl_fifo);
}
void cxio_hal_put_rhdl(u32 rhdl)
{
cxio_hal_put_resource(rhdl_fifo, rhdl);
}
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
{
return cxio_hal_get_resource(rscp->tpt_fifo);
}
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
{
cxio_hal_put_resource(rscp->tpt_fifo, stag);
}
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
{
u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
return qpid;
}
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
{
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
cxio_hal_put_resource(rscp->qpid_fifo, qpid);
}
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
{
return cxio_hal_get_resource(rscp->cqid_fifo);
}
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
{
cxio_hal_put_resource(rscp->cqid_fifo, cqid);
}
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
{
return cxio_hal_get_resource(rscp->pdid_fifo);
}
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
{
cxio_hal_put_resource(rscp->pdid_fifo, pdid);
}
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
{
kfifo_free(rscp->tpt_fifo);
kfifo_free(rscp->cqid_fifo);
kfifo_free(rscp->qpid_fifo);
kfifo_free(rscp->pdid_fifo);
kfree(rscp);
}
/*
* PBL Memory Manager. Uses Linux generic allocator.
*/
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
#define PBL_CHUNK 2*1024*1024
u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
{
unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size);
return (u32)addr;
}
void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size);
gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
}
int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
{
unsigned long i;
rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
if (rdev_p->pbl_pool)
for (i = rdev_p->rnic_info.pbl_base;
i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1;
i += PBL_CHUNK)
gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
return rdev_p->pbl_pool ? 0 : -ENOMEM;
}
void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
{
gen_pool_destroy(rdev_p->pbl_pool);
}
/*
* RQT Memory Manager. Uses Linux generic allocator.
*/
#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
#define RQT_CHUNK 2*1024*1024
u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
{
unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6);
return (u32)addr;
}
void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
{
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6);
gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
}
int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
{
unsigned long i;
rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
if (rdev_p->rqt_pool)
for (i = rdev_p->rnic_info.rqt_base;
i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
i += RQT_CHUNK)
gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
return rdev_p->rqt_pool ? 0 : -ENOMEM;
}
void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
{
gen_pool_destroy(rdev_p->rqt_pool);
}
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXIO_RESOURCE_H__
#define __CXIO_RESOURCE_H__
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/genalloc.h>
#include "cxio_hal.h"
extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
extern void cxio_hal_destroy_rhdl_resource(void);
extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
u32 nr_tpt, u32 nr_pbl,
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
u32 nr_pdid);
extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
#endif
This diff is collapsed.
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <rdma/ib_verbs.h>
#include "cxgb3_offload.h"
#include "iwch_provider.h"
#include "iwch_user.h"
#include "iwch.h"
#include "iwch_cm.h"
#define DRV_VERSION "1.1"
MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
struct cxgb3_client t3c_client = {
.name = "iw_cxgb3",
.add = open_rnic_dev,
.remove = close_rnic_dev,
.handlers = t3c_handlers,
.redirect = iwch_ep_redirect
};
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
static void rnic_init(struct iwch_dev *rnicp)
{
PDBG("%s iwch_dev %p\n", __FUNCTION__, rnicp);
idr_init(&rnicp->cqidr);
idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock);
rnicp->attr.vendor_id = 0x168;
rnicp->attr.vendor_part_id = 7;
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = (1UL << 24) - 1;
rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
rnicp->attr.max_cqes_per_cq = (1UL << 24) - 1;
rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
rnicp->attr.mem_pgsizes_bitmask = 0x7FFF; /* 4KB-128MB */
rnicp->attr.can_resize_wq = 0;
rnicp->attr.max_rdma_reads_per_qp = 8;
rnicp->attr.max_rdma_read_resources =
rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
rnicp->attr.max_rdma_read_depth =
rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
rnicp->attr.rq_overflow_handled = 0;
rnicp->attr.can_modify_ird = 0;
rnicp->attr.can_modify_ord = 0;
rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
rnicp->attr.stag0_value = 1;
rnicp->attr.zbva_support = 1;
rnicp->attr.local_invalidate_fence = 1;
rnicp->attr.cq_overflow_detection = 1;
return;
}
static void open_rnic_dev(struct t3cdev *tdev)
{
struct iwch_dev *rnicp;
static int vers_printed;
PDBG("%s t3cdev %p\n", __FUNCTION__, tdev);
if (!vers_printed++)
printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
DRV_VERSION);
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
if (!rnicp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
return;
}
rnicp->rdev.ulp = rnicp;
rnicp->rdev.t3cdev_p = tdev;
mutex_lock(&dev_mutex);
if (cxio_rdev_open(&rnicp->rdev)) {
mutex_unlock(&dev_mutex);
printk(KERN_ERR MOD "Unable to open CXIO rdev\n");
ib_dealloc_device(&rnicp->ibdev);
return;
}
rnic_init(rnicp);
list_add_tail(&rnicp->entry, &dev_list);
mutex_unlock(&dev_mutex);
if (iwch_register_device(rnicp)) {
printk(KERN_ERR MOD "Unable to register device\n");
close_rnic_dev(tdev);
}
printk(KERN_INFO MOD "Initialized device %s\n",
pci_name(rnicp->rdev.rnic_info.pdev));
return;
}
static void close_rnic_dev(struct t3cdev *tdev)
{
struct iwch_dev *dev, *tmp;
PDBG("%s t3cdev %p\n", __FUNCTION__, tdev);
mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
if (dev->rdev.t3cdev_p == tdev) {
list_del(&dev->entry);
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
idr_destroy(&dev->cqidr);
idr_destroy(&dev->qpidr);
idr_destroy(&dev->mmidr);
ib_dealloc_device(&dev->ibdev);
break;
}
}
mutex_unlock(&dev_mutex);
}
static int __init iwch_init_module(void)
{
int err;
err = cxio_hal_init();
if (err)
return err;
err = iwch_cm_init();
if (err)
return err;
cxio_register_ev_cb(iwch_ev_dispatch);
cxgb3_register_client(&t3c_client);
return 0;
}
static void __exit iwch_exit_module(void)
{
cxgb3_unregister_client(&t3c_client);
cxio_unregister_ev_cb(iwch_ev_dispatch);
iwch_cm_term();
cxio_hal_exit();
}
module_init(iwch_init_module);
module_exit(iwch_exit_module);
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWCH_H__
#define __IWCH_H__
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include "cxio_hal.h"
#include "cxgb3_offload.h"
struct iwch_pd;
struct iwch_cq;
struct iwch_qp;
struct iwch_mr;
struct iwch_rnic_attributes {
u32 vendor_id;
u32 vendor_part_id;
u32 max_qps;
u32 max_wrs; /* Max for any SQ/RQ */
u32 max_sge_per_wr;
u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
u32 max_cqs;
u32 max_cqes_per_cq;
u32 max_mem_regs;
u32 max_phys_buf_entries; /* for phys buf list */
u32 max_pds;
/*
* The memory page sizes supported by this RNIC.
* Bit position i in bitmap indicates page of
* size (4k)^i. Phys block list mode unsupported.
*/
u32 mem_pgsizes_bitmask;
u8 can_resize_wq;
/*
* The maximum number of RDMA Reads that can be outstanding
* per QP with this RNIC as the target.
*/
u32 max_rdma_reads_per_qp;
/*
* The maximum number of resources used for RDMA Reads
* by this RNIC with this RNIC as the target.
*/
u32 max_rdma_read_resources;
/*
* The max depth per QP for initiation of RDMA Read
* by this RNIC.
*/
u32 max_rdma_read_qp_depth;
/*
* The maximum depth for initiation of RDMA Read
* operations by this RNIC on all QPs
*/
u32 max_rdma_read_depth;
u8 rq_overflow_handled;
u32 can_modify_ird;
u32 can_modify_ord;
u32 max_mem_windows;
u32 stag0_value;
u8 zbva_support;
u8 local_invalidate_fence;
u32 cq_overflow_detection;
};
struct iwch_dev {
struct ib_device ibdev;
struct cxio_rdev rdev;
u32 device_cap_flags;
struct iwch_rnic_attributes attr;
struct idr cqidr;
struct idr qpidr;
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
};
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct iwch_dev, ibdev);
}
static inline int t3b_device(const struct iwch_dev *rhp)
{
return rhp->rdev.t3cdev_p->type == T3B;
}
static inline int t3a_device(const struct iwch_dev *rhp)
{
return rhp->rdev.t3cdev_p->type == T3A;
}
static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
{
return idr_find(&rhp->cqidr, cqid);
}
static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
{
return idr_find(&rhp->qpidr, qpid);
}
static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
{
return idr_find(&rhp->mmidr, mmid);
}
static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
void *handle, u32 id)
{
int ret;
u32 newid;
do {
if (!idr_pre_get(idr, GFP_KERNEL)) {
return -ENOMEM;
}
spin_lock_irq(&rhp->lock);
ret = idr_get_new_above(idr, handle, id, &newid);
BUG_ON(newid != id);
spin_unlock_irq(&rhp->lock);
} while (ret == -EAGAIN);
return ret;
}
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
{
spin_lock_irq(&rhp->lock);
idr_remove(idr, id);
spin_unlock_irq(&rhp->lock);
}
extern struct cxgb3_client t3c_client;
extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
#endif
This diff is collapsed.
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _IWCH_CM_H_
#define _IWCH_CM_H_
#include <linux/inet.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <rdma/ib_verbs.h>
#include <rdma/iw_cm.h>
#include "cxgb3_offload.h"
#include "iwch_provider.h"
#define MPA_KEY_REQ "MPA ID Req Frame"
#define MPA_KEY_REP "MPA ID Rep Frame"
#define MPA_MAX_PRIVATE_DATA 256
#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
#define MPA_REJECT 0x20
#define MPA_CRC 0x40
#define MPA_MARKERS 0x80
#define MPA_FLAGS_MASK 0xE0
#define put_ep(ep) { \
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__, \
ep, atomic_read(&((ep)->kref.refcount))); \
kref_put(&((ep)->kref), __free_ep); \
}
#define get_ep(ep) { \
PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \
ep, atomic_read(&((ep)->kref.refcount))); \
kref_get(&((ep)->kref)); \
}
struct mpa_message {
u8 key[16];
u8 flags;
u8 revision;
__be16 private_data_size;
u8 private_data[0];
};
struct terminate_message {
u8 layer_etype;
u8 ecode;
__be16 hdrct_rsvd;
u8 len_hdrs[0];
};
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
enum iwch_layers_types {
LAYER_RDMAP = 0x00,
LAYER_DDP = 0x10,
LAYER_MPA = 0x20,
RDMAP_LOCAL_CATA = 0x00,
RDMAP_REMOTE_PROT = 0x01,
RDMAP_REMOTE_OP = 0x02,
DDP_LOCAL_CATA = 0x00,
DDP_TAGGED_ERR = 0x01,
DDP_UNTAGGED_ERR = 0x02,
DDP_LLP = 0x03
};
enum iwch_rdma_ecodes {
RDMAP_INV_STAG = 0x00,
RDMAP_BASE_BOUNDS = 0x01,
RDMAP_ACC_VIOL = 0x02,
RDMAP_STAG_NOT_ASSOC = 0x03,
RDMAP_TO_WRAP = 0x04,
RDMAP_INV_VERS = 0x05,
RDMAP_INV_OPCODE = 0x06,
RDMAP_STREAM_CATA = 0x07,
RDMAP_GLOBAL_CATA = 0x08,
RDMAP_CANT_INV_STAG = 0x09,
RDMAP_UNSPECIFIED = 0xff
};
enum iwch_ddp_ecodes {
DDPT_INV_STAG = 0x00,
DDPT_BASE_BOUNDS = 0x01,
DDPT_STAG_NOT_ASSOC = 0x02,
DDPT_TO_WRAP = 0x03,
DDPT_INV_VERS = 0x04,
DDPU_INV_QN = 0x01,
DDPU_INV_MSN_NOBUF = 0x02,
DDPU_INV_MSN_RANGE = 0x03,
DDPU_INV_MO = 0x04,
DDPU_MSG_TOOBIG = 0x05,
DDPU_INV_VERS = 0x06
};
enum iwch_mpa_ecodes {
MPA_CRC_ERR = 0x02,
MPA_MARKER_ERR = 0x03
};
enum iwch_ep_state {
IDLE = 0,
LISTEN,
CONNECTING,
MPA_REQ_WAIT,
MPA_REQ_SENT,
MPA_REQ_RCVD,
MPA_REP_SENT,
FPDU_MODE,
ABORTING,
CLOSING,
MORIBUND,
DEAD,
};
struct iwch_ep_common {
struct iw_cm_id *cm_id;
struct iwch_qp *qp;
struct t3cdev *tdev;
enum iwch_ep_state state;
struct kref kref;
spinlock_t lock;
struct sockaddr_in local_addr;
struct sockaddr_in remote_addr;
wait_queue_head_t waitq;
int rpl_done;
int rpl_err;
};
struct iwch_listen_ep {
struct iwch_ep_common com;
unsigned int stid;
int backlog;
};
struct iwch_ep {
struct iwch_ep_common com;
struct iwch_ep *parent_ep;
struct timer_list timer;
unsigned int atid;
u32 hwtid;
u32 snd_seq;
struct l2t_entry *l2t;
struct dst_entry *dst;
struct sk_buff *mpa_skb;
struct iwch_mpa_attributes mpa_attr;
unsigned int mpa_pkt_len;
u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
u8 tos;
u16 emss;
u16 plen;
u32 ird;
u32 ord;
};
static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
{
return cm_id->provider_data;
}
static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
{
return cm_id->provider_data;
}
static inline int compute_wscale(int win)
{
int wscale = 0;
while (wscale < 14 && (65535<<wscale) < win)
wscale++;
return wscale;
}
/* CM prototypes */
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
int iwch_destroy_listen(struct iw_cm_id *cm_id);
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
int iwch_quiesce_tid(struct iwch_ep *ep);
int iwch_resume_tid(struct iwch_ep *ep);
void __free_ep(struct kref *kref);
void iwch_rearp(struct iwch_ep *ep);
int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
int __init iwch_cm_init(void);
void __exit iwch_cm_term(void);
#endif /* _IWCH_CM_H_ */
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "iwch_provider.h"
#include "iwch.h"
/*
* Get one cq entry from cxio and map it to openib.
*
* Returns:
* 0 EMPTY;
* 1 cqe returned
* -EAGAIN caller must try again
* any other -errno fatal error
*/
static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
struct ib_wc *wc)
{
struct iwch_qp *qhp = NULL;
struct t3_cqe cqe, *rd_cqe;
struct t3_wq *wq;
u32 credit = 0;
u8 cqe_flushed;
u64 cookie;
int ret = 1;
rd_cqe = cxio_next_cqe(&chp->cq);
if (!rd_cqe)
return 0;
qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
if (!qhp)
wq = NULL;
else {
spin_lock(&qhp->lock);
wq = &(qhp->wq);
}
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
&credit);
if (t3a_device(chp->rhp) && credit) {
PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
credit, chp->cq.cqid);
cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
}
if (ret) {
ret = -EAGAIN;
goto out;
}
ret = 1;
wc->wr_id = cookie;
wc->qp = &qhp->ibqp;
wc->vendor_err = CQE_STATUS(cqe);
PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
"lo 0x%x cookie 0x%llx\n", __FUNCTION__,
CQE_QPID(cqe), CQE_TYPE(cqe),
CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
CQE_WRID_LOW(cqe), (unsigned long long) cookie);
if (CQE_TYPE(cqe) == 0) {
if (!CQE_STATUS(cqe))
wc->byte_len = CQE_LEN(cqe);
else
wc->byte_len = 0;
wc->opcode = IB_WC_RECV;
} else {
switch (CQE_OPCODE(cqe)) {
case T3_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case T3_READ_REQ:
wc->opcode = IB_WC_RDMA_READ;
wc->byte_len = CQE_LEN(cqe);
break;
case T3_SEND:
case T3_SEND_WITH_SE:
wc->opcode = IB_WC_SEND;
break;
case T3_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
/* these aren't supported yet */
case T3_SEND_WITH_INV:
case T3_SEND_WITH_SE_INV:
case T3_LOCAL_INV:
case T3_FAST_REGISTER:
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(cqe), CQE_QPID(cqe));
ret = -EINVAL;
goto out;
}
}
if (cqe_flushed)
wc->status = IB_WC_WR_FLUSH_ERR;
else {
switch (CQE_STATUS(cqe)) {
case TPT_ERR_SUCCESS:
wc->status = IB_WC_SUCCESS;
break;
case TPT_ERR_STAG:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case TPT_ERR_PDID:
wc->status = IB_WC_LOC_PROT_ERR;
break;
case TPT_ERR_QPID:
case TPT_ERR_ACCESS:
wc->status = IB_WC_LOC_ACCESS_ERR;
break;
case TPT_ERR_WRAP:
wc->status = IB_WC_GENERAL_ERR;
break;
case TPT_ERR_BOUND:
wc->status = IB_WC_LOC_LEN_ERR;
break;
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
wc->status = IB_WC_MW_BIND_ERR;
break;
case TPT_ERR_CRC:
case TPT_ERR_MARKER:
case TPT_ERR_PDU_LEN_ERR:
case TPT_ERR_OUT_OF_RQE:
case TPT_ERR_DDP_VERSION:
case TPT_ERR_RDMA_VERSION:
case TPT_ERR_DDP_QUEUE_NUM:
case TPT_ERR_MSN:
case TPT_ERR_TBIT:
case TPT_ERR_MO:
case TPT_ERR_MSN_RANGE:
case TPT_ERR_IRD_OVERFLOW:
case TPT_ERR_OPCODE:
wc->status = IB_WC_FATAL_ERR;
break;
case TPT_ERR_SWFLUSH:
wc->status = IB_WC_WR_FLUSH_ERR;
break;
default:
printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
"QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
ret = -EINVAL;
}
}
out:
if (wq)
spin_unlock(&qhp->lock);
return ret;
}
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct iwch_dev *rhp;
struct iwch_cq *chp;
unsigned long flags;
int npolled;
int err = 0;
chp = to_iwch_cq(ibcq);
rhp = chp->rhp;
spin_lock_irqsave(&chp->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled) {
#ifdef DEBUG
int i=0;
#endif
/*
* Because T3 can post CQEs that are _not_ associated
* with a WR, we might have to poll again after removing
* one of these.
*/
do {
err = iwch_poll_cq_one(rhp, chp, wc + npolled);
#ifdef DEBUG
BUG_ON(++i > 1000);
#endif
} while (err == -EAGAIN);
if (err <= 0)
break;
}
spin_unlock_irqrestore(&chp->lock, flags);
if (err < 0)
return err;
else {
return npolled;
}
}
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/mman.h>
#include <net/sock.h>
#include "iwch_provider.h"
#include "iwch.h"
#include "iwch_cm.h"
#include "cxio_hal.h"
#include "cxio_wr.h"
static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct respQ_msg_t *rsp_msg,
enum ib_event_type ib_event,
int send_term)
{
struct ib_event event;
struct iwch_qp_attributes attrs;
struct iwch_qp *qhp;
printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
spin_lock(&rnicp->lock);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
if (!qhp) {
printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
__FUNCTION__, CQE_STATUS(rsp_msg->cqe),
CQE_QPID(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
return;
}
if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
(qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
PDBG("%s AE received after RTS - "
"qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__,
qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
return;
}
atomic_inc(&qhp->refcnt);
spin_unlock(&rnicp->lock);
event.event = ib_event;
event.device = chp->ibcq.device;
if (ib_event == IB_EVENT_CQ_ERR)
event.element.cq = &chp->ibcq;
else
event.element.qp = &qhp->ibqp;
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
if (qhp->attr.state == IWCH_QP_STATE_RTS) {
attrs.next_state = IWCH_QP_STATE_TERMINATE;
iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
&attrs, 1);
if (send_term)
iwch_post_terminate(qhp, rsp_msg);
}
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);
}
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
{
struct iwch_dev *rnicp;
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
struct iwch_cq *chp;
struct iwch_qp *qhp;
u32 cqid = RSPQ_CQID(rsp_msg);
rnicp = (struct iwch_dev *) rdev_p->ulp;
spin_lock(&rnicp->lock);
chp = get_chp(rnicp, cqid);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
if (!chp || !qhp) {
printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
cqid, CQE_QPID(rsp_msg->cqe),
CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
CQE_WRID_LOW(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
goto out;
}
iwch_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
spin_unlock(&rnicp->lock);
/*
* 1) completion of our sending a TERMINATE.
* 2) incoming TERMINATE message.
*/
if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
(CQE_STATUS(rsp_msg->cqe) == 0)) {
if (SQ_TYPE(rsp_msg->cqe)) {
PDBG("%s QPID 0x%x ep %p disconnecting\n",
__FUNCTION__, qhp->wq.qpid, qhp->ep);
iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
} else {
PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__,
qhp->wq.qpid);
post_qp_event(rnicp, chp, rsp_msg,
IB_EVENT_QP_REQ_ERR, 0);
iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
}
goto done;
}
/* Bad incoming Read request */
if (SQ_TYPE(rsp_msg->cqe) &&
(CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
goto done;
}
/* Bad incoming write */
if (RQ_TYPE(rsp_msg->cqe) &&
(CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
goto done;
}
switch (CQE_STATUS(rsp_msg->cqe)) {
/* Completion Events */
case TPT_ERR_SUCCESS:
/*
* Confirm the destination entry if this is a RECV completion.
*/
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
dst_confirm(qhp->ep->dst);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
break;
case TPT_ERR_STAG:
case TPT_ERR_PDID:
case TPT_ERR_QPID:
case TPT_ERR_ACCESS:
case TPT_ERR_WRAP:
case TPT_ERR_BOUND:
case TPT_ERR_INVALIDATE_SHARED_MR:
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
break;
/* Device Fatal Errors */
case TPT_ERR_ECC:
case TPT_ERR_ECC_PSTAG:
case TPT_ERR_INTERNAL_ERR:
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
break;
/* QP Fatal Errors */
case TPT_ERR_OUT_OF_RQE:
case TPT_ERR_PBL_ADDR_BOUND:
case TPT_ERR_CRC:
case TPT_ERR_MARKER:
case TPT_ERR_PDU_LEN_ERR:
case TPT_ERR_DDP_VERSION:
case TPT_ERR_RDMA_VERSION:
case TPT_ERR_OPCODE:
case TPT_ERR_DDP_QUEUE_NUM:
case TPT_ERR_MSN:
case TPT_ERR_TBIT:
case TPT_ERR_MO:
case TPT_ERR_MSN_GAP:
case TPT_ERR_MSN_RANGE:
case TPT_ERR_RQE_ADDR_BOUND:
case TPT_ERR_IRD_OVERFLOW:
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
break;
default:
printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n",
CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
break;
}
done:
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
iwch_qp_rem_ref(&qhp->ibqp);
out:
dev_kfree_skb_irq(skb);
}
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm/byteorder.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include "cxio_hal.h"
#include "iwch.h"
#include "iwch_provider.h"
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
__be64 *page_list)
{
u32 stag;
u32 mmid;
if (cxio_register_phys_mem(&rhp->rdev,
&stag, mhp->attr.pdid,
mhp->attr.perms,
mhp->attr.zbva,
mhp->attr.va_fbo,
mhp->attr.len,
shift-12,
page_list,
&mhp->attr.pbl_size, &mhp->attr.pbl_addr))
return -ENOMEM;
mhp->attr.state = 1;
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
return 0;
}
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
__be64 *page_list,
int npages)
{
u32 stag;
u32 mmid;
/* We could support this... */
if (npages > mhp->attr.pbl_size)
return -ENOMEM;
stag = mhp->attr.stag;
if (cxio_reregister_phys_mem(&rhp->rdev,
&stag, mhp->attr.pdid,
mhp->attr.perms,
mhp->attr.zbva,
mhp->attr.va_fbo,
mhp->attr.len,
shift-12,
page_list,
&mhp->attr.pbl_size, &mhp->attr.pbl_addr))
return -ENOMEM;
mhp->attr.state = 1;
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
return 0;
}
int build_phys_page_list(struct ib_phys_buf *buffer_list,
int num_phys_buf,
u64 *iova_start,
u64 *total_size,
int *npages,
int *shift,
__be64 **page_list)
{
u64 mask;
int i, j, n;
mask = 0;
*total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
return -EINVAL;
if (i != 0 && i != num_phys_buf - 1 &&
(buffer_list[i].size & ~PAGE_MASK))
return -EINVAL;
*total_size += buffer_list[i].size;
if (i > 0)
mask |= buffer_list[i].addr;
}
if (*total_size > 0xFFFFFFFFULL)
return -ENOMEM;
/* Find largest page shift we can use to cover buffers */
for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
if (num_phys_buf > 1) {
if ((1ULL << *shift) & mask)
break;
} else
if (1ULL << *shift >=
buffer_list[0].size +
(buffer_list[0].addr & ((1ULL << *shift) - 1)))
break;
buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
buffer_list[0].addr &= ~0ull << *shift;
*npages = 0;
for (i = 0; i < num_phys_buf; ++i)
*npages += (buffer_list[i].size +
(1ULL << *shift) - 1) >> *shift;
if (!*npages)
return -EINVAL;
*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
if (!*page_list)
return -ENOMEM;
n = 0;
for (i = 0; i < num_phys_buf; ++i)
for (j = 0;
j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
++j)
(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
((u64) j << *shift));
PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
__FUNCTION__, (unsigned long long) *iova_start,
(unsigned long long) mask, *shift, (unsigned long long) *total_size,
*npages);
return 0;
}
This diff is collapsed.
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWCH_PROVIDER_H__
#define __IWCH_PROVIDER_H__
#include <linux/list.h>
#include <linux/spinlock.h>
#include <rdma/ib_verbs.h>
#include <asm/types.h>
#include "t3cdev.h"
#include "iwch.h"
#include "cxio_wr.h"
#include "cxio_hal.h"
struct iwch_pd {
struct ib_pd ibpd;
u32 pdid;
struct iwch_dev *rhp;
};
static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct iwch_pd, ibpd);
}
struct tpt_attributes {
u32 stag;
u32 state:1;
u32 type:2;
u32 rsvd:1;
enum tpt_mem_perm perms;
u32 remote_invaliate_disable:1;
u32 zbva:1;
u32 mw_bind_enable:1;
u32 page_size:5;
u32 pdid;
u32 qpid;
u32 pbl_addr;
u32 len;
u64 va_fbo;
u32 pbl_size;
};
struct iwch_mr {
struct ib_mr ibmr;
struct iwch_dev *rhp;
u64 kva;
struct tpt_attributes attr;
};
typedef struct iwch_mw iwch_mw_handle;
static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct iwch_mr, ibmr);
}
struct iwch_mw {
struct ib_mw ibmw;
struct iwch_dev *rhp;
u64 kva;
struct tpt_attributes attr;
};
static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct iwch_mw, ibmw);
}
struct iwch_cq {
struct ib_cq ibcq;
struct iwch_dev *rhp;
struct t3_cq cq;
spinlock_t lock;
atomic_t refcnt;
wait_queue_head_t wait;
u32 __user *user_rptr_addr;
};
static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct iwch_cq, ibcq);
}
enum IWCH_QP_FLAGS {
QP_QUIESCED = 0x01
};
struct iwch_mpa_attributes {
u8 recv_marker_enabled;
u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
u8 crc_enabled;
u8 version; /* 0 or 1 */
};
struct iwch_qp_attributes {
u32 scq;
u32 rcq;
u32 sq_num_entries;
u32 rq_num_entries;
u32 sq_max_sges;
u32 sq_max_sges_rdma_write;
u32 rq_max_sges;
u32 state;
u8 enable_rdma_read;
u8 enable_rdma_write; /* enable inbound Read Resp. */
u8 enable_bind;
u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
/*
* Next QP state. If specify the current state, only the
* QP attributes will be modified.
*/
u32 max_ord;
u32 max_ird;
u32 pd; /* IN */
u32 next_state;
char terminate_buffer[52];
u32 terminate_msg_len;
u8 is_terminate_local;
struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
struct iwch_ep *llp_stream_handle;
char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
u32 stream_msg_buf_len; /* Only on Idle -> RTS */
};
struct iwch_qp {
struct ib_qp ibqp;
struct iwch_dev *rhp;
struct iwch_ep *ep;
struct iwch_qp_attributes attr;
struct t3_wq wq;
spinlock_t lock;
atomic_t refcnt;
wait_queue_head_t wait;
enum IWCH_QP_FLAGS flags;
struct timer_list timer;
};
static inline int qp_quiesced(struct iwch_qp *qhp)
{
return qhp->flags & QP_QUIESCED;
}
static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct iwch_qp, ibqp);
}
void iwch_qp_add_ref(struct ib_qp *qp);
void iwch_qp_rem_ref(struct ib_qp *qp);
struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn);
struct iwch_ucontext {
struct ib_ucontext ibucontext;
struct cxio_ucontext uctx;
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
};
static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
{
return container_of(c, struct iwch_ucontext, ibucontext);
}
struct iwch_mm_entry {
struct list_head entry;
u64 addr;
u32 key;
unsigned len;
};
static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
u32 key, unsigned len)
{
struct list_head *pos, *nxt;
struct iwch_mm_entry *mm;
spin_lock(&ucontext->mmap_lock);
list_for_each_safe(pos, nxt, &ucontext->mmaps) {
mm = list_entry(pos, struct iwch_mm_entry, entry);
if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock);
PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
key, (unsigned long long) mm->addr, mm->len);
return mm;
}
}
spin_unlock(&ucontext->mmap_lock);
return NULL;
}
static inline void insert_mmap(struct iwch_ucontext *ucontext,
struct iwch_mm_entry *mm)
{
spin_lock(&ucontext->mmap_lock);
PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
mm->key, (unsigned long long) mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock);
}
enum iwch_qp_attr_mask {
IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
IWCH_QP_ATTR_MAX_ORD = 1 << 11,
IWCH_QP_ATTR_MAX_IRD = 1 << 12,
IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
IWCH_QP_ATTR_MAX_ORD |
IWCH_QP_ATTR_MAX_IRD |
IWCH_QP_ATTR_LLP_STREAM_HANDLE |
IWCH_QP_ATTR_STREAM_MSG_BUFFER |
IWCH_QP_ATTR_MPA_ATTR |
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
};
int iwch_modify_qp(struct iwch_dev *rhp,
struct iwch_qp *qhp,
enum iwch_qp_attr_mask mask,
struct iwch_qp_attributes *attrs,
int internal);
enum iwch_qp_state {
IWCH_QP_STATE_IDLE,
IWCH_QP_STATE_RTS,
IWCH_QP_STATE_ERROR,
IWCH_QP_STATE_TERMINATE,
IWCH_QP_STATE_CLOSING,
IWCH_QP_STATE_TOT
};
static inline int iwch_convert_state(enum ib_qp_state ib_state)
{
switch (ib_state) {
case IB_QPS_RESET:
case IB_QPS_INIT:
return IWCH_QP_STATE_IDLE;
case IB_QPS_RTS:
return IWCH_QP_STATE_RTS;
case IB_QPS_SQD:
return IWCH_QP_STATE_CLOSING;
case IB_QPS_SQE:
return IWCH_QP_STATE_TERMINATE;
case IB_QPS_ERR:
return IWCH_QP_STATE_ERROR;
default:
return -1;
}
}
enum iwch_mem_perms {
IWCH_MEM_ACCESS_LOCAL_READ = 1 << 0,
IWCH_MEM_ACCESS_LOCAL_WRITE = 1 << 1,
IWCH_MEM_ACCESS_REMOTE_READ = 1 << 2,
IWCH_MEM_ACCESS_REMOTE_WRITE = 1 << 3,
IWCH_MEM_ACCESS_ATOMICS = 1 << 4,
IWCH_MEM_ACCESS_BINDING = 1 << 5,
IWCH_MEM_ACCESS_LOCAL =
(IWCH_MEM_ACCESS_LOCAL_READ | IWCH_MEM_ACCESS_LOCAL_WRITE),
IWCH_MEM_ACCESS_REMOTE =
(IWCH_MEM_ACCESS_REMOTE_WRITE | IWCH_MEM_ACCESS_REMOTE_READ)
/* cannot go beyond 1 << 31 */
} __attribute__ ((packed));
static inline u32 iwch_convert_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_WRITE ? IWCH_MEM_ACCESS_REMOTE_WRITE : 0)
| (acc & IB_ACCESS_REMOTE_READ ? IWCH_MEM_ACCESS_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? IWCH_MEM_ACCESS_LOCAL_WRITE : 0) |
(acc & IB_ACCESS_MW_BIND ? IWCH_MEM_ACCESS_BINDING : 0) |
IWCH_MEM_ACCESS_LOCAL_READ;
}
enum iwch_mmid_state {
IWCH_STAG_STATE_VALID,
IWCH_STAG_STATE_INVALID
};
enum iwch_qp_query_flags {
IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
/*
* Quiesce QP context; Consumer
* will NOT replay outstanding WR
*/
IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
};
int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
int iwch_bind_mw(struct ib_qp *qp,
struct ib_mw *mw,
struct ib_mw_bind *mw_bind);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev);
int iwch_quiesce_qps(struct iwch_cq *chp);
int iwch_resume_qps(struct iwch_cq *chp);
void stop_read_rep_timer(struct iwch_qp *qhp);
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
__be64 *page_list);
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
__be64 *page_list,
int npages);
int build_phys_page_list(struct ib_phys_buf *buffer_list,
int num_phys_buf,
u64 *iova_start,
u64 *total_size,
int *npages,
int *shift,
__be64 **page_list);
#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
#endif
This diff is collapsed.
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWCH_USER_H__
#define __IWCH_USER_H__
#define IWCH_UVERBS_ABI_VERSION 1
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct iwch_create_cq_req {
__u64 user_rptr_addr;
};
struct iwch_create_cq_resp {
__u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_qp_resp {
__u64 key;
__u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
__u32 rq_size_log2;
};
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
#endif
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment