Commit 04d740d5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 - One minor fix to the ib core
 - Four minor fixes to the Mellanox drivers
 - Remove three deprecated drivers from staging/rdma now that all of
   Greg's queued changes to them are merged

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  staging/rdma: remove deprecated ipath driver
  staging/rdma: remove deprecated ehca driver
  staging/rdma: remove deprecated amso1100 driver
  IB/core: Set correct payload length for RoCEv2 over IPv6
  IB/mlx5: Use MLX5_GET to correctly get end of padding mode
  IB/mlx5: Fix use of null pointer PD
  IB/mlx5: Fix reqlen validation in mlx5_ib_alloc_ucontext
  IB/mlx5: Add CREATE_CQ and CREATE_QP to uverbs_ex_cmd_mask
parents b37a05c0 b85d9905
......@@ -686,13 +686,6 @@ M: Michael Hanselmann <linux-kernel@hansmi.ch>
S: Supported
F: drivers/macintosh/ams/
AMSO1100 RNIC DRIVER
M: Tom Tucker <tom@opengridcomputing.com>
M: Steve Wise <swise@opengridcomputing.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
......@@ -4198,13 +4191,6 @@ W: http://aeschi.ch.eu.org/efs/
S: Orphan
F: fs/efs/
EHCA (IBM GX bus InfiniBand adapter) DRIVER
M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
M: Christoph Raisch <raisch@de.ibm.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
......@@ -5823,12 +5809,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
F: net/ipv4/netfilter/ipt_MASQUERADE.c
IPATH DRIVER
M: Mike Marciniszyn <infinipath@intel.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/staging/rdma/ipath/
IPMI SUBSYSTEM
M: Corey Minyard <minyard@acm.org>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
......
......@@ -322,6 +322,8 @@ int ib_ud_header_init(int payload_bytes,
int immediate_present,
struct ib_ud_header *header)
{
size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
grh_present = grh_present && !ip_version;
memset(header, 0, sizeof *header);
......@@ -353,7 +355,8 @@ int ib_ud_header_init(int payload_bytes,
if (ip_version == 6 || grh_present) {
header->grh.ip_version = 6;
header->grh.payload_length =
cpu_to_be16((IB_BTH_BYTES +
cpu_to_be16((udp_bytes +
IB_BTH_BYTES +
IB_DETH_BYTES +
payload_bytes +
4 + /* ICRC */
......@@ -362,8 +365,6 @@ int ib_ud_header_init(int payload_bytes,
}
if (ip_version == 4) {
int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
header->ip4.ver = 4; /* version 4 */
header->ip4.hdr_len = 5; /* 5 words */
header->ip4.tot_len =
......
......@@ -844,6 +844,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int err;
int i;
size_t reqlen;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
......@@ -854,7 +856,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
else if (reqlen >= min_req_v2)
ver = 2;
else
return ERR_PTR(-EINVAL);
......@@ -2214,7 +2216,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP);
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
......
......@@ -1036,7 +1036,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
MLX5_GET64(qpc, qpc, end_padding_mode));
MLX5_GET(qpc, qpc, end_padding_mode));
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
......@@ -1615,15 +1615,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
if (pd) {
dev = to_mdev(pd->device);
} else {
/* being cautious here */
if (init_attr->qp_type != IB_QPT_XRC_TGT &&
init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
pr_warn("%s: no PD for transport %s\n", __func__,
ib_qp_type_str(init_attr->qp_type));
return ERR_PTR(-EINVAL);
}
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
if (!pd->uobject) {
......@@ -1634,6 +1625,15 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
}
} else {
/* being cautious here */
if (init_attr->qp_type != IB_QPT_XRC_TGT &&
init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
pr_warn("%s: no PD for transport %s\n", __func__,
ib_qp_type_str(init_attr->qp_type));
return ERR_PTR(-EINVAL);
}
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
switch (init_attr->qp_type) {
......
......@@ -22,12 +22,6 @@ menuconfig STAGING_RDMA
# Please keep entries in alphabetic order
if STAGING_RDMA
source "drivers/staging/rdma/amso1100/Kconfig"
source "drivers/staging/rdma/ehca/Kconfig"
source "drivers/staging/rdma/hfi1/Kconfig"
source "drivers/staging/rdma/ipath/Kconfig"
endif
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
config INFINIBAND_AMSO1100
tristate "Ammasso 1100 HCA support"
depends on PCI && INET
---help---
This is a low-level driver for the Ammasso 1100 host
channel adapter (HCA).
config INFINIBAND_AMSO1100_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_AMSO1100
default n
---help---
This option causes the amso1100 driver to produce a bunch of
debug messages. Select this if you are developing the driver
or trying to diagnose a problem.
7/2015
The amso1100 driver has been deprecated and moved to drivers/staging.
It will be removed in the 4.6 merge window.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include <rdma/iw_cm.h>
#include "c2_status.h"
#include "c2_ae.h"
static int c2_convert_cm_status(u32 c2_status)
{
switch (c2_status) {
case C2_CONN_STATUS_SUCCESS:
return 0;
case C2_CONN_STATUS_REJECTED:
return -ENETRESET;
case C2_CONN_STATUS_REFUSED:
return -ECONNREFUSED;
case C2_CONN_STATUS_TIMEDOUT:
return -ETIMEDOUT;
case C2_CONN_STATUS_NETUNREACH:
return -ENETUNREACH;
case C2_CONN_STATUS_HOSTUNREACH:
return -EHOSTUNREACH;
case C2_CONN_STATUS_INVALID_RNIC:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP_STATE:
return -EINVAL;
case C2_CONN_STATUS_ADDR_NOT_AVAIL:
return -EADDRNOTAVAIL;
default:
printk(KERN_ERR PFX
"%s - Unable to convert CM status: %d\n",
__func__, c2_status);
return -EIO;
}
}
static const char* to_event_str(int event)
{
static const char* event_str[] = {
"CCAE_REMOTE_SHUTDOWN",
"CCAE_ACTIVE_CONNECT_RESULTS",
"CCAE_CONNECTION_REQUEST",
"CCAE_LLP_CLOSE_COMPLETE",
"CCAE_TERMINATE_MESSAGE_RECEIVED",
"CCAE_LLP_CONNECTION_RESET",
"CCAE_LLP_CONNECTION_LOST",
"CCAE_LLP_SEGMENT_SIZE_INVALID",
"CCAE_LLP_INVALID_CRC",
"CCAE_LLP_BAD_FPDU",
"CCAE_INVALID_DDP_VERSION",
"CCAE_INVALID_RDMA_VERSION",
"CCAE_UNEXPECTED_OPCODE",
"CCAE_INVALID_DDP_QUEUE_NUMBER",
"CCAE_RDMA_READ_NOT_ENABLED",
"CCAE_RDMA_WRITE_NOT_ENABLED",
"CCAE_RDMA_READ_TOO_SMALL",
"CCAE_NO_L_BIT",
"CCAE_TAGGED_INVALID_STAG",
"CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
"CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
"CCAE_TAGGED_INVALID_PD",
"CCAE_WRAP_ERROR",
"CCAE_BAD_CLOSE",
"CCAE_BAD_LLP_CLOSE",
"CCAE_INVALID_MSN_RANGE",
"CCAE_INVALID_MSN_GAP",
"CCAE_IRRQ_OVERFLOW",
"CCAE_IRRQ_MSN_GAP",
"CCAE_IRRQ_MSN_RANGE",
"CCAE_IRRQ_INVALID_STAG",
"CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
"CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
"CCAE_IRRQ_INVALID_PD",
"CCAE_IRRQ_WRAP_ERROR",
"CCAE_CQ_SQ_COMPLETION_OVERFLOW",
"CCAE_CQ_RQ_COMPLETION_ERROR",
"CCAE_QP_SRQ_WQE_ERROR",
"CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
"CCAE_CQ_OVERFLOW",
"CCAE_CQ_OPERATION_ERROR",
"CCAE_SRQ_LIMIT_REACHED",
"CCAE_QP_RQ_LIMIT_REACHED",
"CCAE_SRQ_CATASTROPHIC_ERROR",
"CCAE_RNIC_CATASTROPHIC_ERROR"
};
if (event < CCAE_REMOTE_SHUTDOWN ||
event > CCAE_RNIC_CATASTROPHIC_ERROR)
return "<invalid event>";
event -= CCAE_REMOTE_SHUTDOWN;
return event_str[event];
}
static const char *to_qp_state_str(int state)
{
switch (state) {
case C2_QP_STATE_IDLE:
return "C2_QP_STATE_IDLE";
case C2_QP_STATE_CONNECTING:
return "C2_QP_STATE_CONNECTING";
case C2_QP_STATE_RTS:
return "C2_QP_STATE_RTS";
case C2_QP_STATE_CLOSING:
return "C2_QP_STATE_CLOSING";
case C2_QP_STATE_TERMINATE:
return "C2_QP_STATE_TERMINATE";
case C2_QP_STATE_ERROR:
return "C2_QP_STATE_ERROR";
default:
return "<invalid QP state>";
}
}
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
{
struct c2_mq *mq = c2dev->qptr_array[mq_index];
union c2wr *wr;
void *resource_user_context;
struct iw_cm_event cm_event;
struct ib_event ib_event;
enum c2_resource_indicator resource_indicator;
enum c2_event_id event_id;
unsigned long flags;
int status;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
/*
* retrieve the message
*/
wr = c2_mq_consume(mq);
if (!wr)
return;
memset(&ib_event, 0, sizeof(ib_event));
memset(&cm_event, 0, sizeof(cm_event));
event_id = c2_wr_get_id(wr);
resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
resource_user_context =
(void *) (unsigned long) wr->ae.ae_generic.user_context;
status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
pr_debug("event received c2_dev=%p, event_id=%d, "
"resource_indicator=%d, user_context=%p, status = %d\n",
c2dev, event_id, resource_indicator, resource_user_context,
status);
switch (resource_indicator) {
case C2_RES_IND_QP:{
struct c2_qp *qp = resource_user_context;
struct iw_cm_id *cm_id = qp->cm_id;
struct c2wr_ae_active_connect_results *res;
if (!cm_id) {
pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
qp);
goto ignore_it;
}
pr_debug("%s: event = %s, user_context=%llx, "
"resource_type=%x, "
"resource=%x, qp_state=%s\n",
__func__,
to_event_str(event_id),
(unsigned long long) wr->ae.ae_generic.user_context,
be32_to_cpu(wr->ae.ae_generic.resource_type),
be32_to_cpu(wr->ae.ae_generic.resource),
to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
switch (event_id) {
case CCAE_ACTIVE_CONNECT_RESULTS:
res = &wr->ae.ae_active_connect_results;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
laddr->sin_addr.s_addr = res->laddr;
raddr->sin_addr.s_addr = res->raddr;
laddr->sin_port = res->lport;
raddr->sin_port = res->rport;
if (status == 0) {
cm_event.private_data_len =
be32_to_cpu(res->private_data_length);
cm_event.private_data = res->private_data;
} else {
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.private_data_len = 0;
cm_event.private_data = NULL;
}
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
case CCAE_TERMINATE_MESSAGE_RECEIVED:
case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
ib_event.device = &c2dev->ibdev;
ib_event.element.qp = &qp->ibqp;
ib_event.event = IB_EVENT_QP_REQ_ERR;
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&ib_event,
qp->ibqp.
qp_context);
break;
case CCAE_BAD_CLOSE:
case CCAE_LLP_CLOSE_COMPLETE:
case CCAE_LLP_CONNECTION_RESET:
case CCAE_LLP_CONNECTION_LOST:
BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
default:
BUG_ON(1);
pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
"CM_ID=%p\n",
__func__, __LINE__,
event_id, qp, cm_id);
break;
}
break;
}
case C2_RES_IND_EP:{
struct c2wr_ae_connection_request *req =
&wr->ae.ae_connection_request;
struct iw_cm_id *cm_id =
resource_user_context;
pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
if (event_id != CCAE_CONNECTION_REQUEST) {
pr_debug("%s: Invalid event_id: %d\n",
__func__, event_id);
break;
}
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
laddr->sin_addr.s_addr = req->laddr;
raddr->sin_addr.s_addr = req->raddr;
laddr->sin_port = req->lport;
raddr->sin_port = req->rport;
cm_event.private_data_len =
be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data;
/*
* Until ird/ord negotiation via MPAv2 support is added, send
* max supported values
*/
cm_event.ird = cm_event.ord = 128;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
}
case C2_RES_IND_CQ:{
struct c2_cq *cq =
resource_user_context;
pr_debug("IB_EVENT_CQ_ERR\n");
ib_event.device = &c2dev->ibdev;
ib_event.element.cq = &cq->ibcq;
ib_event.event = IB_EVENT_CQ_ERR;
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
break;
}
default:
printk("Bad resource indicator = %d\n",
resource_indicator);
break;
}
ignore_it:
c2_mq_free(mq);
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_AE_H_
#define _C2_AE_H_
/*
* WARNING: If you change this file, also bump C2_IVN_BASE
* in common/include/clustercore/c2_ivn.h.
*/
/*
* Asynchronous Event Identifiers
*
* These start at 0x80 only so it's obvious from inspection that
* they are not work-request statuses. This isn't critical.
*
* NOTE: these event id's must fit in eight bits.
*/
enum c2_event_id {
CCAE_REMOTE_SHUTDOWN = 0x80,
CCAE_ACTIVE_CONNECT_RESULTS,
CCAE_CONNECTION_REQUEST,
CCAE_LLP_CLOSE_COMPLETE,
CCAE_TERMINATE_MESSAGE_RECEIVED,
CCAE_LLP_CONNECTION_RESET,
CCAE_LLP_CONNECTION_LOST,
CCAE_LLP_SEGMENT_SIZE_INVALID,
CCAE_LLP_INVALID_CRC,
CCAE_LLP_BAD_FPDU,
CCAE_INVALID_DDP_VERSION,
CCAE_INVALID_RDMA_VERSION,
CCAE_UNEXPECTED_OPCODE,
CCAE_INVALID_DDP_QUEUE_NUMBER,
CCAE_RDMA_READ_NOT_ENABLED,
CCAE_RDMA_WRITE_NOT_ENABLED,
CCAE_RDMA_READ_TOO_SMALL,
CCAE_NO_L_BIT,
CCAE_TAGGED_INVALID_STAG,
CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
CCAE_TAGGED_INVALID_PD,
CCAE_WRAP_ERROR,
CCAE_BAD_CLOSE,
CCAE_BAD_LLP_CLOSE,
CCAE_INVALID_MSN_RANGE,
CCAE_INVALID_MSN_GAP,
CCAE_IRRQ_OVERFLOW,
CCAE_IRRQ_MSN_GAP,
CCAE_IRRQ_MSN_RANGE,
CCAE_IRRQ_INVALID_STAG,
CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
CCAE_IRRQ_INVALID_PD,
CCAE_IRRQ_WRAP_ERROR,
CCAE_CQ_SQ_COMPLETION_OVERFLOW,
CCAE_CQ_RQ_COMPLETION_ERROR,
CCAE_QP_SRQ_WQE_ERROR,
CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
CCAE_CQ_OVERFLOW,
CCAE_CQ_OPERATION_ERROR,
CCAE_SRQ_LIMIT_REACHED,
CCAE_QP_RQ_LIMIT_REACHED,
CCAE_SRQ_CATASTROPHIC_ERROR,
CCAE_RNIC_CATASTROPHIC_ERROR
/* WARNING If you add more id's, make sure their values fit in eight bits. */
};
/*
* Resource Indicators and Identifiers
*/
enum c2_resource_indicator {
C2_RES_IND_QP = 1,
C2_RES_IND_EP,
C2_RES_IND_CQ,
C2_RES_IND_SRQ,
};
#endif /* _C2_AE_H_ */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/bitmap.h>
#include "c2.h"
static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **head)
{
int i;
struct sp_chunk *new_head;
dma_addr_t dma_addr;
new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
&dma_addr, gfp_mask);
if (new_head == NULL)
return -ENOMEM;
new_head->dma_addr = dma_addr;
dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
/* build list where each index is the next free slot */
for (i = 0;
i < (PAGE_SIZE - sizeof(struct sp_chunk) -
sizeof(u16)) / sizeof(u16) - 1;
i++) {
new_head->shared_ptr[i] = i + 1;
}
/* terminate list */
new_head->shared_ptr[i] = 0xFFFF;
*head = new_head;
return 0;
}
int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **root)
{
return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
}
void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
{
struct sp_chunk *next;
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
dma_unmap_addr(root, mapping));
root = next;
}
}
__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
dma_addr_t *dma_addr, gfp_t gfp_mask)
{
u16 mqsp;
while (head) {
mqsp = head->head;
if (mqsp != 0xFFFF) {
head->head = head->shared_ptr[mqsp];
break;
} else if (head->next == NULL) {
if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
0) {
head = head->next;
mqsp = head->head;
head->head = head->shared_ptr[mqsp];
break;
} else
return NULL;
} else
head = head->next;
}
if (head) {
*dma_addr = head->dma_addr +
((unsigned long) &(head->shared_ptr[mqsp]) -
(unsigned long) head);
pr_debug("%s addr %p dma_addr %llx\n", __func__,
&(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
return (__force __be16 *) &(head->shared_ptr[mqsp]);
}
return NULL;
}
void c2_free_mqsp(__be16 *mqsp)
{
struct sp_chunk *head;
u16 idx;
/* The chunk containing this ptr begins at the page boundary */
head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
/* Link head to new mqsp */
*mqsp = (__force __be16) head->head;
/* Compute the shared_ptr index */
idx = (offset_in_page(mqsp)) >> 1;
idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
/* Point this index at the head */
head->shared_ptr[idx] = head->head;
/* Point head at this index */
head->head = idx;
}
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include <rdma/iw_cm.h>
#include "c2_vq.h"
static void handle_mq(struct c2_dev *c2dev, u32 index);
static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
/*
* Handle RNIC interrupts
*/
void c2_rnic_interrupt(struct c2_dev *c2dev)
{
unsigned int mq_index;
while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
if (mq_index & 0x80000000) {
break;
}
c2dev->hints_read++;
handle_mq(c2dev, mq_index);
}
}
/*
* Top level MQ handler
*/
static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
{
if (c2dev->qptr_array[mq_index] == NULL) {
pr_debug("handle_mq: stray activity for mq_index=%d\n",
mq_index);
return;
}
switch (mq_index) {
case (0):
/*
* An index of 0 in the activity queue
* indicates the req vq now has messages
* available...
*
* Wake up any waiters waiting on req VQ
* message availability.
*/
wake_up(&c2dev->req_vq_wo);
break;
case (1):
handle_vq(c2dev, mq_index);
break;
case (2):
/* We have to purge the VQ in case there are pending
* accept reply requests that would result in the
* generation of an ESTABLISHED event. If we don't
* generate these first, a CLOSE event could end up
* being delivered before the ESTABLISHED event.
*/
handle_vq(c2dev, 1);
c2_ae_event(c2dev, mq_index);
break;
default:
/* There is no event synchronization between CQ events
* and AE or CM events. In fact, CQE could be
* delivered for all of the I/O up to and including the
* FLUSH for a peer disconenct prior to the ESTABLISHED
* event being delivered to the app. The reason for this
* is that CM events are delivered on a thread, while AE
* and CM events are delivered on interrupt context.
*/
c2_cq_event(c2dev, mq_index);
break;
}
return;
}
/*
* Handles verbs WR replies.
*/
static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
{
void *adapter_msg, *reply_msg;
struct c2wr_hdr *host_msg;
struct c2wr_hdr tmp;
struct c2_mq *reply_vq;
struct c2_vq_req *req;
struct iw_cm_event cm_event;
int err;
reply_vq = c2dev->qptr_array[mq_index];
/*
* get next msg from mq_index into adapter_msg.
* don't free it yet.
*/
adapter_msg = c2_mq_consume(reply_vq);
if (adapter_msg == NULL) {
return;
}
host_msg = vq_repbuf_alloc(c2dev);
/*
* If we can't get a host buffer, then we'll still
* wakeup the waiter, we just won't give him the msg.
* It is assumed the waiter will deal with this...
*/
if (!host_msg) {
pr_debug("handle_vq: no repbufs!\n");
/*
* just copy the WR header into a local variable.
* this allows us to still demux on the context
*/
host_msg = &tmp;
memcpy(host_msg, adapter_msg, sizeof(tmp));
reply_msg = NULL;
} else {
memcpy(host_msg, adapter_msg, reply_vq->msg_size);
reply_msg = host_msg;
}
/*
* consume the msg from the MQ
*/
c2_mq_free(reply_vq);
/*
* wakeup the waiter.
*/
req = (struct c2_vq_req *) (unsigned long) host_msg->context;
if (req == NULL) {
/*
* We should never get here, as the adapter should
* never send us a reply that we're not expecting.
*/
if (reply_msg != NULL)
vq_repbuf_free(c2dev, host_msg);
pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
return;
}
if (reply_msg)
err = c2_errno(reply_msg);
else
err = -ENOMEM;
if (!err) switch (req->event) {
case IW_CM_EVENT_ESTABLISHED:
c2_set_qp_state(req->qp,
C2_QP_STATE_RTS);
/*
* Until ird/ord negotiation via MPAv2 support is added, send
* max supported values
*/
cm_event.ird = cm_event.ord = 128;
case IW_CM_EVENT_CLOSE:
/*
* Move the QP to RTS if this is
* the established event
*/
cm_event.event = req->event;
cm_event.status = 0;
cm_event.local_addr = req->cm_id->local_addr;
cm_event.remote_addr = req->cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
req->cm_id->event_handler(req->cm_id, &cm_event);
break;
default:
break;
}
req->reply_msg = (u64) (unsigned long) (reply_msg);
atomic_set(&req->reply_ready, 1);
wake_up(&req->wait_object);
/*
* If the request was cancelled, then this put will
* free the vq_req memory...and reply_msg!!!
*/
vq_req_put(c2dev, req);
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "c2.h"
#include "c2_vq.h"
#define PBL_VIRT 1
#define PBL_PHYS 2
/*
* Send all the PBL messages to convey the remainder of the PBL
* Wait for the adapter's reply on the last one.
* This is indicated by setting the MEM_PBL_COMPLETE in the flags.
*
* NOTE: vq_req is _not_ freed by this function. The VQ Host
* Reply buffer _is_ freed by this function.
*/
static int
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
unsigned long va, u32 pbl_depth,
struct c2_vq_req *vq_req, int pbl_type)
{
u32 pbe_count; /* amt that fits in a PBL msg */
u32 count; /* amt in this PBL MSG. */
struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
int err, pbl_virt, pbl_index, i;
switch (pbl_type) {
case PBL_VIRT:
pbl_virt = 1;
break;
case PBL_PHYS:
pbl_virt = 0;
break;
default:
return -EINVAL;
break;
}
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
return -ENOMEM;
}
c2_wr_set_id(wr, CCWR_NSMR_PBL);
/*
* Only the last PBL message will generate a reply from the verbs,
* so we set the context to 0 indicating there is no kernel verbs
* handler blocked awaiting this reply.
*/
wr->hdr.context = 0;
wr->rnic_handle = c2dev->adapter_handle;
wr->stag_index = stag_index; /* already swapped */
wr->flags = 0;
pbl_index = 0;
while (pbl_depth) {
count = min(pbe_count, pbl_depth);
wr->addrs_length = cpu_to_be32(count);
/*
* If this is the last message, then reference the
* vq request struct cuz we're gonna wait for a reply.
* also make this PBL msg as the last one.
*/
if (count == pbl_depth) {
/*
* reference the request struct. dereferenced in the
* int handler.
*/
vq_req_get(c2dev, vq_req);
wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
/*
* This is the last PBL message.
* Set the context to our VQ Request Object so we can
* wait for the reply.
*/
wr->hdr.context = (unsigned long) vq_req;
}
/*
* If pbl_virt is set then va is a virtual address
* that describes a virtually contiguous memory
* allocation. The wr needs the start of each virtual page
* to be converted to the corresponding physical address
* of the page. If pbl_virt is not set then va is an array
* of physical addresses and there is no conversion to do.
* Just fill in the wr with what is in the array.
*/
for (i = 0; i < count; i++) {
if (pbl_virt) {
va += PAGE_SIZE;
} else {
wr->paddrs[i] =
cpu_to_be64(((u64 *)va)[pbl_index + i]);
}
}
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
if (count <= pbe_count) {
vq_req_put(c2dev, vq_req);
}
goto bail0;
}
pbl_depth -= count;
pbl_index += count;
}
/*
* Now wait for the reply...
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
kfree(wr);
return err;
}
#define C2_PBL_MAX_DEPTH 131072
int
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
int page_size, int pbl_depth, u32 length,
u32 offset, u64 *va, enum c2_acf acf,
struct c2_mr *mr)
{
struct c2_vq_req *vq_req;
struct c2wr_nsmr_register_req *wr;
struct c2wr_nsmr_register_rep *reply;
u16 flags;
int i, pbe_count, count;
int err;
if (!va || !length || !addr_list || !pbl_depth)
return -EINTR;
/*
* Verify PBL depth is within rnic max
*/
if (pbl_depth > C2_PBL_MAX_DEPTH) {
return -EINTR;
}
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
err = -ENOMEM;
goto bail0;
}
/*
* build the WR
*/
c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
wr->hdr.context = (unsigned long) vq_req;
wr->rnic_handle = c2dev->adapter_handle;
flags = (acf | MEM_VA_BASED | MEM_REMOTE);
/*
* compute how many pbes can fit in the message
*/
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
if (pbl_depth <= pbe_count) {
flags |= MEM_PBL_COMPLETE;
}
wr->flags = cpu_to_be16(flags);
wr->stag_key = 0; //stag_key;
wr->va = cpu_to_be64(*va);
wr->pd_id = mr->pd->pd_id;
wr->pbe_size = cpu_to_be32(page_size);
wr->length = cpu_to_be32(length);
wr->pbl_depth = cpu_to_be32(pbl_depth);
wr->fbo = cpu_to_be32(offset);
count = min(pbl_depth, pbe_count);
wr->addrs_length = cpu_to_be32(count);
/*
* fill out the PBL for this message
*/
for (i = 0; i < count; i++) {
wr->paddrs[i] = cpu_to_be64(addr_list[i]);
}
/*
* regerence the request struct
*/
vq_req_get(c2dev, vq_req);
/*
* send the WR to the adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail1;
}
/*
* wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail1;
}
/*
* process reply
*/
reply =
(struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail1;
}
if ((err = c2_errno(reply))) {
goto bail2;
}
//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
vq_repbuf_free(c2dev, reply);
/*
* if there are still more PBEs we need to send them to
* the adapter and wait for a reply on the final one.
* reuse vq_req for this purpose.
*/
pbl_depth -= count;
if (pbl_depth) {
vq_req->reply_msg = (unsigned long) NULL;
atomic_set(&vq_req->reply_ready, 0);
err = send_pbl_messages(c2dev,
cpu_to_be32(mr->ibmr.lkey),
(unsigned long) &addr_list[i],
pbl_depth, vq_req, PBL_PHYS);
if (err) {
goto bail1;
}
}
vq_req_free(c2dev, vq_req);
kfree(wr);
return err;
bail2:
vq_repbuf_free(c2dev, reply);
bail1:
kfree(wr);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
{
struct c2_vq_req *vq_req; /* verbs request object */
struct c2wr_stag_dealloc_req wr; /* work request */
struct c2wr_stag_dealloc_rep *reply; /* WR reply */
int err;
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
return -ENOMEM;
}
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.stag_index = cpu_to_be32(stag_index);
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include "c2_mq.h"
void *c2_mq_alloc(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
if (c2_mq_full(q)) {
return NULL;
} else {
#ifdef DEBUG
struct c2wr_hdr *m =
(struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
#ifdef CCMSGMAGIC
BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
m->magic = cpu_to_be32(CCWR_MAGIC);
#endif
return m;
#else
return q->msg_pool.host + q->priv * q->msg_size;
#endif
}
}
void c2_mq_produce(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
if (!c2_mq_full(q)) {
q->priv = (q->priv + 1) % q->q_size;
q->hint_count++;
/* Update peer's offset. */
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}
void *c2_mq_consume(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_HOST_TARGET);
if (c2_mq_empty(q)) {
return NULL;
} else {
#ifdef DEBUG
struct c2wr_hdr *m = (struct c2wr_hdr *)
(q->msg_pool.host + q->priv * q->msg_size);
#ifdef CCMSGMAGIC
BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
#endif
return m;
#else
return q->msg_pool.host + q->priv * q->msg_size;
#endif
}
}
void c2_mq_free(struct c2_mq *q)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_HOST_TARGET);
if (!c2_mq_empty(q)) {
#ifdef CCMSGMAGIC
{
struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
(q->msg_pool.adapter + q->priv * q->msg_size);
__raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
}
#endif
q->priv = (q->priv + 1) % q->q_size;
/* Update peer's offset. */
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}
void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
{
BUG_ON(q->magic != C2_MQ_MAGIC);
BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
while (wqe_count--) {
BUG_ON(c2_mq_empty(q));
*q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
}
}
#if 0
u32 c2_mq_count(struct c2_mq *q)
{
s32 count;
if (q->type == C2_MQ_HOST_TARGET)
count = be16_to_cpu(*q->shared) - q->priv;
else
count = q->priv - be16_to_cpu(*q->shared);
if (count < 0)
count += q->q_size;
return (u32) count;
}
#endif /* 0 */
void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
{
BUG_ON(!q->shared);
/* This code assumes the byte swapping has already been done! */
q->index = index;
q->q_size = q_size;
q->msg_size = msg_size;
q->msg_pool.adapter = pool_start;
q->peer = (struct c2_mq_shared __iomem *) peer;
q->magic = C2_MQ_MAGIC;
q->type = type;
q->priv = 0;
q->hint_count = 0;
return;
}
void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 *pool_start, u16 __iomem *peer, u32 type)
{
BUG_ON(!q->shared);
/* This code assumes the byte swapping has already been done! */
q->index = index;
q->q_size = q_size;
q->msg_size = msg_size;
q->msg_pool.host = pool_start;
q->peer = (struct c2_mq_shared __iomem *) peer;
q->magic = C2_MQ_MAGIC;
q->type = type;
q->priv = 0;
q->hint_count = 0;
return;
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_MQ_H_
#define _C2_MQ_H_
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include "c2_wr.h"
enum c2_shared_regs {
C2_SHARED_ARMED = 0x10,
C2_SHARED_NOTIFY = 0x18,
C2_SHARED_SHARED = 0x40,
};
struct c2_mq_shared {
u16 unused1;
u8 armed;
u8 notification_type;
u32 unused2;
u16 shared;
/* Pad to 64 bytes. */
u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
};
enum c2_mq_type {
C2_MQ_HOST_TARGET = 1,
C2_MQ_ADAPTER_TARGET = 2,
};
/*
* c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
* c2_user_mq_t (which is the same format) is for user-mode MQs...
*/
#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
struct c2_mq {
u32 magic;
union {
u8 *host;
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
__be16 *shared;
dma_addr_t shared_dma;
u32 q_size;
u32 msg_size;
u32 index;
enum c2_mq_type type;
};
static __inline__ int c2_mq_empty(struct c2_mq *q)
{
return q->priv == be16_to_cpu(*q->shared);
}
static __inline__ int c2_mq_full(struct c2_mq *q)
{
return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
}
void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
void *c2_mq_alloc(struct c2_mq *q);
void c2_mq_produce(struct c2_mq *q);
void *c2_mq_consume(struct c2_mq *q);
void c2_mq_free(struct c2_mq *q);
void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 *pool_start, u16 __iomem *peer, u32 type);
#endif /* _C2_MQ_H_ */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include "c2.h"
#include "c2_provider.h"
int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
{
u32 obj;
int ret = 0;
spin_lock(&c2dev->pd_table.lock);
obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
c2dev->pd_table.last);
if (obj >= c2dev->pd_table.max)
obj = find_first_zero_bit(c2dev->pd_table.table,
c2dev->pd_table.max);
if (obj < c2dev->pd_table.max) {
pd->pd_id = obj;
__set_bit(obj, c2dev->pd_table.table);
c2dev->pd_table.last = obj+1;
if (c2dev->pd_table.last >= c2dev->pd_table.max)
c2dev->pd_table.last = 0;
} else
ret = -ENOMEM;
spin_unlock(&c2dev->pd_table.lock);
return ret;
}
void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
{
spin_lock(&c2dev->pd_table.lock);
__clear_bit(pd->pd_id, c2dev->pd_table.table);
spin_unlock(&c2dev->pd_table.lock);
}
int c2_init_pd_table(struct c2_dev *c2dev)
{
c2dev->pd_table.last = 0;
c2dev->pd_table.max = c2dev->props.max_pd;
spin_lock_init(&c2dev->pd_table.lock);
c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
sizeof(long), GFP_KERNEL);
if (!c2dev->pd_table.table)
return -ENOMEM;
bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
return 0;
}
void c2_cleanup_pd_table(struct c2_dev *c2dev)
{
kfree(c2dev->pd_table.table);
}
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef C2_PROVIDER_H
#define C2_PROVIDER_H
#include <linux/inetdevice.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include "c2_mq.h"
#include <rdma/iw_cm.h>
#define C2_MPT_FLAG_ATOMIC (1 << 14)
#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
struct c2_buf_list {
void *buf;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
/* The user context keeps track of objects allocated for a
* particular user-mode client. */
struct c2_ucontext {
struct ib_ucontext ibucontext;
};
struct c2_mtt;
/* All objects associated with a PD are kept in the
* associated user context if present.
*/
struct c2_pd {
struct ib_pd ibpd;
u32 pd_id;
};
struct c2_mr {
struct ib_mr ibmr;
struct c2_pd *pd;
struct ib_umem *umem;
};
struct c2_av;
enum c2_ah_type {
C2_AH_ON_HCA,
C2_AH_PCI_POOL,
C2_AH_KMALLOC
};
struct c2_ah {
struct ib_ah ibah;
};
struct c2_cq {
struct ib_cq ibcq;
spinlock_t lock;
atomic_t refcount;
int cqn;
int is_kernel;
wait_queue_head_t wait;
u32 adapter_handle;
struct c2_mq mq;
};
struct c2_wq {
spinlock_t lock;
};
struct iw_cm_id;
struct c2_qp {
struct ib_qp ibqp;
struct iw_cm_id *cm_id;
spinlock_t lock;
atomic_t refcount;
wait_queue_head_t wait;
int qpn;
u32 adapter_handle;
u32 send_sgl_depth;
u32 recv_sgl_depth;
u32 rdma_write_sgl_depth;
u8 state;
struct c2_mq sq_mq;
struct c2_mq rq_mq;
};
struct c2_cr_query_attrs {
u32 local_addr;
u32 remote_addr;
u16 local_port;
u16 remote_port;
};
static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct c2_pd, ibpd);
}
static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct c2_ucontext, ibucontext);
}
static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct c2_mr, ibmr);
}
static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
{
return container_of(ibah, struct c2_ah, ibah);
}
static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct c2_cq, ibcq);
}
static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct c2_qp, ibqp);
}
static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
{
struct in_device *ind;
int ret = 0;
ind = in_dev_get(netdev);
if (!ind)
return 0;
for_ifa(ind) {
if (ifa->ifa_address == addr) {
ret = 1;
break;
}
}
endfor_ifa(ind);
in_dev_put(ind);
return ret;
}
#endif /* C2_PROVIDER_H */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_STATUS_H_
#define _C2_STATUS_H_
/*
* Verbs Status Codes
*/
enum c2_status {
C2_OK = 0, /* This must be zero */
CCERR_INSUFFICIENT_RESOURCES = 1,
CCERR_INVALID_MODIFIER = 2,
CCERR_INVALID_MODE = 3,
CCERR_IN_USE = 4,
CCERR_INVALID_RNIC = 5,
CCERR_INTERRUPTED_OPERATION = 6,
CCERR_INVALID_EH = 7,
CCERR_INVALID_CQ = 8,
CCERR_CQ_EMPTY = 9,
CCERR_NOT_IMPLEMENTED = 10,
CCERR_CQ_DEPTH_TOO_SMALL = 11,
CCERR_PD_IN_USE = 12,
CCERR_INVALID_PD = 13,
CCERR_INVALID_SRQ = 14,
CCERR_INVALID_ADDRESS = 15,
CCERR_INVALID_NETMASK = 16,
CCERR_INVALID_QP = 17,
CCERR_INVALID_QP_STATE = 18,
CCERR_TOO_MANY_WRS_POSTED = 19,
CCERR_INVALID_WR_TYPE = 20,
CCERR_INVALID_SGL_LENGTH = 21,
CCERR_INVALID_SQ_DEPTH = 22,
CCERR_INVALID_RQ_DEPTH = 23,
CCERR_INVALID_ORD = 24,
CCERR_INVALID_IRD = 25,
CCERR_QP_ATTR_CANNOT_CHANGE = 26,
CCERR_INVALID_STAG = 27,
CCERR_QP_IN_USE = 28,
CCERR_OUTSTANDING_WRS = 29,
CCERR_STAG_IN_USE = 30,
CCERR_INVALID_STAG_INDEX = 31,
CCERR_INVALID_SGL_FORMAT = 32,
CCERR_ADAPTER_TIMEOUT = 33,
CCERR_INVALID_CQ_DEPTH = 34,
CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
CCERR_INVALID_EP = 36,
CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
CCERR_FLUSHED = 38,
CCERR_INVALID_WQE = 39,
CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
CCERR_REMOTE_TERMINATION_ERROR = 41,
CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
CCERR_ACCESS_VIOLATION = 43,
CCERR_INVALID_PD_ID = 44,
CCERR_WRAP_ERROR = 45,
CCERR_INV_STAG_ACCESS_ERROR = 46,
CCERR_ZERO_RDMA_READ_RESOURCES = 47,
CCERR_QP_NOT_PRIVILEGED = 48,
CCERR_STAG_STATE_NOT_INVALID = 49,
CCERR_INVALID_PAGE_SIZE = 50,
CCERR_INVALID_BUFFER_SIZE = 51,
CCERR_INVALID_PBE = 52,
CCERR_INVALID_FBO = 53,
CCERR_INVALID_LENGTH = 54,
CCERR_INVALID_ACCESS_RIGHTS = 55,
CCERR_PBL_TOO_BIG = 56,
CCERR_INVALID_VA = 57,
CCERR_INVALID_REGION = 58,
CCERR_INVALID_WINDOW = 59,
CCERR_TOTAL_LENGTH_TOO_BIG = 60,
CCERR_INVALID_QP_ID = 61,
CCERR_ADDR_IN_USE = 62,
CCERR_ADDR_NOT_AVAIL = 63,
CCERR_NET_DOWN = 64,
CCERR_NET_UNREACHABLE = 65,
CCERR_CONN_ABORTED = 66,
CCERR_CONN_RESET = 67,
CCERR_NO_BUFS = 68,
CCERR_CONN_TIMEDOUT = 69,
CCERR_CONN_REFUSED = 70,
CCERR_HOST_UNREACHABLE = 71,
CCERR_INVALID_SEND_SGL_DEPTH = 72,
CCERR_INVALID_RECV_SGL_DEPTH = 73,
CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
CCERR_INSUFFICIENT_PRIVILEGES = 75,
CCERR_STACK_ERROR = 76,
CCERR_INVALID_VERSION = 77,
CCERR_INVALID_MTU = 78,
CCERR_INVALID_IMAGE = 79,
CCERR_PENDING = 98, /* not an error; user internally by adapter */
CCERR_DEFER = 99, /* not an error; used internally by adapter */
CCERR_FAILED_WRITE = 100,
CCERR_FAILED_ERASE = 101,
CCERR_FAILED_VERIFICATION = 102,
CCERR_NOT_FOUND = 103,
};
/*
* CCAE_ACTIVE_CONNECT_RESULTS status result codes.
*/
enum c2_connect_status {
C2_CONN_STATUS_SUCCESS = C2_OK,
C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
};
/*
* Flash programming status codes.
*/
enum c2_flash_status {
C2_FLASH_STATUS_SUCCESS = 0x0000,
C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
C2_FLASH_STATUS_ECLBS = 0x0400,
C2_FLASH_STATUS_PSLBS = 0x0800,
C2_FLASH_STATUS_VPENS = 0x1000,
};
#endif /* _C2_STATUS_H_ */
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef C2_USER_H
#define C2_USER_H
#include <linux/types.h>
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct c2_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 uarc_size;
};
struct c2_alloc_pd_resp {
__u32 pdn;
__u32 reserved;
};
struct c2_create_cq {
__u32 lkey;
__u32 pdn;
__u64 arm_db_page;
__u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
struct c2_create_cq_resp {
__u32 cqn;
__u32 reserved;
};
struct c2_create_qp {
__u32 lkey;
__u32 reserved;
__u64 sq_db_page;
__u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
#endif /* C2_USER_H */
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_VQ_H_
#define _C2_VQ_H_
#include <linux/sched.h>
#include "c2.h"
#include "c2_wr.h"
#include "c2_provider.h"
struct c2_vq_req {
u64 reply_msg; /* ptr to reply msg */
wait_queue_head_t wait_object; /* wait object for vq reqs */
atomic_t reply_ready; /* set when reply is ready */
atomic_t refcnt; /* used to cancel WRs... */
int event;
struct iw_cm_id *cm_id;
struct c2_qp *qp;
};
int vq_init(struct c2_dev *c2dev);
void vq_term(struct c2_dev *c2dev);
struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
void *vq_repbuf_alloc(struct c2_dev *c2dev);
void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
#endif /* _C2_VQ_H_ */
This diff is collapsed.
config INFINIBAND_EHCA
tristate "eHCA support"
depends on IBMEBUS
---help---
This driver supports the deprecated IBM pSeries eHCA InfiniBand
adapter.
To compile the driver as a module, choose M here. The module
will be called ib_ehca.
# Authors: Heiko J Schick <schickhj@de.ibm.com>
# Christoph Raisch <raisch@de.ibm.com>
# Joachim Fenkes <fenkes@de.ibm.com>
#
# Copyright (c) 2005 IBM Corporation
#
# All rights reserved.
#
# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
9/2015
The ehca driver has been deprecated and moved to drivers/staging/rdma.
It will be removed in the 4.6 merge window.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Function definitions and structs for EQs, NEQs and interrupts
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EHCA_IRQ_H
#define __EHCA_IRQ_H
struct ehca_shca;
#include <linux/interrupt.h>
#include <linux/types.h>
int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
void ehca_tasklet_neq(unsigned long data);
irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
void ehca_tasklet_eq(unsigned long data);
void ehca_process_eq(struct ehca_shca *shca, int is_irq);
struct ehca_cpu_comp_task {
struct list_head cq_list;
spinlock_t task_lock;
int cq_jobs;
int active;
};
struct ehca_comp_pool {
struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
struct task_struct * __percpu *cpu_comp_threads;
int last_cpu;
spinlock_t last_cpu_lock;
};
int ehca_create_comp_pool(void);
void ehca_destroy_comp_pool(void);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
The ipath driver has been moved to staging in preparation for its removal in a
few releases. The driver will be deleted during the 4.6 merge window.
Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
Cc: linux-rdma@vger.kernel.org
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment