Commit 273b2578 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mad: Test ib_create_send_mad() return with IS_ERR(), not == NULL
  IB/mlx4: Allow 4K messages for UD QPs
  mlx4_core: Add ethernet fields to CQE struct
  IB/ipath: Fix printk format warnings
  RDMA/cxgb3: Fix deadlock initializing iw_cxgb3 device
  RDMA/cxgb3: Fix up MW access rights
  RDMA/cxgb3: Fix QP capabilities
  RDMA/cma: Remove padding arrays by using struct sockaddr_storage
  IB/ipath: Use unsigned long for irq flags
  IPoIB/cm: Set correct SG list in ipoib_cm_init_rx_wr()
parents 8e43e12d 06a91a02
...@@ -155,9 +155,7 @@ struct cma_multicast { ...@@ -155,9 +155,7 @@ struct cma_multicast {
} multicast; } multicast;
struct list_head list; struct list_head list;
void *context; void *context;
struct sockaddr addr; struct sockaddr_storage addr;
u8 pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
}; };
struct cma_work { struct cma_work {
...@@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv, ...@@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
cma_cancel_route(id_priv); cma_cancel_route(id_priv);
break; break;
case CMA_LISTEN: case CMA_LISTEN:
if (cma_any_addr(&id_priv->id.route.addr.src_addr) && if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
!id_priv->cma_dev) && !id_priv->cma_dev)
cma_cancel_listens(id_priv); cma_cancel_listens(id_priv);
break; break;
default: default:
...@@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, ...@@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
ret = rdma_translate_ip(&id->route.addr.src_addr, ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
&id->route.addr.dev_addr); &id->route.addr.dev_addr);
if (ret) if (ret)
goto destroy_id; goto destroy_id;
...@@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, ...@@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
cma_save_net_info(&id->route.addr, &listen_id->route.addr, cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst); ip_ver, port, src, dst);
ret = rdma_translate_ip(&id->route.addr.src_addr, ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
&id->route.addr.dev_addr); &id->route.addr.dev_addr);
if (ret) if (ret)
goto err; goto err;
...@@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) ...@@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
if (IS_ERR(id_priv->cm_id.ib)) if (IS_ERR(id_priv->cm_id.ib))
return PTR_ERR(id_priv->cm_id.ib); return PTR_ERR(id_priv->cm_id.ib);
addr = &id_priv->id.route.addr.src_addr; addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr); svc_id = cma_get_service_id(id_priv->id.ps, addr);
if (cma_any_addr(addr)) if (cma_any_addr(addr))
ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
...@@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, ...@@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
dev_id_priv->state = CMA_ADDR_BOUND; dev_id_priv->state = CMA_ADDR_BOUND;
memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
ip_addr_size(&id_priv->id.route.addr.src_addr)); ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
cma_attach_to_dev(dev_id_priv, cma_dev); cma_attach_to_dev(dev_id_priv, cma_dev);
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
...@@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, ...@@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
path_rec.numb_path = 1; path_rec.numb_path = 1;
path_rec.reversible = 1; path_rec.reversible = 1;
path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); path_rec.service_id = cma_get_service_id(id_priv->id.ps,
(struct sockaddr *) &addr->dst_addr);
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
if (addr->src_addr.sa_family == AF_INET) { if (addr->src_addr.ss_family == AF_INET) {
path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
comp_mask |= IB_SA_PATH_REC_QOS_CLASS; comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
} else { } else {
...@@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) ...@@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
src_in->sin_family = dst_in->sin_family; src_in->sin_family = dst_in->sin_family;
...@@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, ...@@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (cma_any_addr(dst_addr)) if (cma_any_addr(dst_addr))
ret = cma_resolve_loopback(id_priv); ret = cma_resolve_loopback(id_priv);
else else
ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr,
dst_addr, &id->route.addr.dev_addr, dst_addr, &id->route.addr.dev_addr,
timeout_ms, addr_handler, id_priv); timeout_ms, addr_handler, id_priv);
if (ret) if (ret)
...@@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) ...@@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
* We don't support binding to any address if anyone is bound to * We don't support binding to any address if anyone is bound to
* a specific address on the same port. * a specific address on the same port.
*/ */
if (cma_any_addr(&id_priv->id.route.addr.src_addr)) if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
if (cma_any_addr(&cur_id->id.route.addr.src_addr)) if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
...@@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) ...@@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
} }
mutex_lock(&lock); mutex_lock(&lock);
if (cma_any_port(&id_priv->id.route.addr.src_addr)) if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr))
ret = cma_alloc_any_port(ps, id_priv); ret = cma_alloc_any_port(ps, id_priv);
else else
ret = cma_use_port(ps, id_priv); ret = cma_use_port(ps, id_priv);
...@@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, ...@@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.path = route->path_rec; req.path = route->path_rec;
req.service_id = cma_get_service_id(id_priv->id.ps, req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr); (struct sockaddr *) &route->addr.dst_addr);
req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
req.max_cm_retries = CMA_MAX_CM_RETRIES; req.max_cm_retries = CMA_MAX_CM_RETRIES;
...@@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, ...@@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.alternate_path = &route->path_rec[1]; req.alternate_path = &route->path_rec[1];
req.service_id = cma_get_service_id(id_priv->id.ps, req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr); (struct sockaddr *) &route->addr.dst_addr);
req.qp_num = id_priv->qp_num; req.qp_num = id_priv->qp_num;
req.qp_type = IB_QPT_RC; req.qp_type = IB_QPT_RC;
req.starting_psn = id_priv->seq_num; req.starting_psn = id_priv->seq_num;
...@@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, ...@@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
if (ret) if (ret)
return ret; return ret;
cma_set_mgid(id_priv, &mc->addr, &rec.mgid); cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
if (id_priv->id.ps == RDMA_PS_UDP) if (id_priv->id.ps == RDMA_PS_UDP)
rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
ib_addr_get_sgid(dev_addr, &rec.port_gid); ib_addr_get_sgid(dev_addr, &rec.port_gid);
......
...@@ -133,7 +133,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, ...@@ -133,7 +133,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, hdr_len, recv_wc->wc->pkey_index, 1, hdr_len,
0, GFP_KERNEL); 0, GFP_KERNEL);
if (!msg) if (IS_ERR(msg))
return; return;
format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
......
...@@ -81,9 +81,7 @@ struct ucma_multicast { ...@@ -81,9 +81,7 @@ struct ucma_multicast {
u64 uid; u64 uid;
struct list_head list; struct list_head list;
struct sockaddr addr; struct sockaddr_storage addr;
u8 pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
}; };
struct ucma_event { struct ucma_event {
...@@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file, ...@@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file,
return PTR_ERR(ctx); return PTR_ERR(ctx);
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
addr = &ctx->cm_id->route.addr.src_addr; addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6)); sizeof(struct sockaddr_in6));
addr = &ctx->cm_id->route.addr.dst_addr; addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6)); sizeof(struct sockaddr_in6));
...@@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, ...@@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
mc->uid = cmd.uid; mc->uid = cmd.uid;
memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
if (ret) if (ret)
goto err2; goto err2;
...@@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, ...@@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
return 0; return 0;
err3: err3:
rdma_leave_multicast(ctx->cm_id, &mc->addr); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
err2: err2:
mutex_lock(&mut); mutex_lock(&mut);
...@@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, ...@@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out; goto out;
} }
rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_lock(&mc->ctx->file->mut); mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
list_del(&mc->list); list_del(&mc->list);
......
...@@ -725,9 +725,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, ...@@ -725,9 +725,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
BUG_ON(page_size >= 28); BUG_ON(page_size >= 28);
tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
F_TPT_MW_BIND_ENABLE | ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size)); V_TPT_PAGE_SIZE(page_size));
tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len); tpt.len = cpu_to_be32(len);
......
...@@ -1187,28 +1187,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, ...@@ -1187,28 +1187,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
} }
static int fw_supports_fastreg(struct iwch_dev *iwch_dev)
{
struct ethtool_drvinfo info;
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
char *cp, *next;
unsigned fw_maj, fw_min;
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
next = info.fw_version+1;
cp = strsep(&next, ".");
sscanf(cp, "%i", &fw_maj);
cp = strsep(&next, ".");
sscanf(cp, "%i", &fw_min);
PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min);
return fw_maj > 6 || (fw_maj == 6 && fw_min > 0);
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
{ {
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
...@@ -1325,12 +1303,12 @@ int iwch_register_device(struct iwch_dev *dev) ...@@ -1325,12 +1303,12 @@ int iwch_register_device(struct iwch_dev *dev)
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE; dev->ibdev.owner = THIS_MODULE;
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
/* cxgb3 supports STag 0. */ /* cxgb3 supports STag 0. */
dev->ibdev.local_dma_lkey = 0; dev->ibdev.local_dma_lkey = 0;
if (fw_supports_fastreg(dev))
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
dev->ibdev.uverbs_cmd_mask = dev->ibdev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
......
...@@ -293,9 +293,16 @@ static inline u32 iwch_ib_to_tpt_access(int acc) ...@@ -293,9 +293,16 @@ static inline u32 iwch_ib_to_tpt_access(int acc)
return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
(acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
TPT_LOCAL_READ; TPT_LOCAL_READ;
} }
static inline u32 iwch_ib_to_tpt_bind_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
}
enum iwch_mmid_state { enum iwch_mmid_state {
IWCH_STAG_STATE_VALID, IWCH_STAG_STATE_VALID,
IWCH_STAG_STATE_INVALID IWCH_STAG_STATE_INVALID
......
...@@ -565,7 +565,7 @@ int iwch_bind_mw(struct ib_qp *qp, ...@@ -565,7 +565,7 @@ int iwch_bind_mw(struct ib_qp *qp,
wqe->bind.type = TPT_VATO; wqe->bind.type = TPT_VATO;
/* TBD: check perms */ /* TBD: check perms */
wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags); wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
wqe->bind.mw_stag = cpu_to_be32(mw->rkey); wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
wqe->bind.mw_len = cpu_to_be32(mw_bind->length); wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
...@@ -879,20 +879,13 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, ...@@ -879,20 +879,13 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
(qhp->attr.mpa_attr.xmit_marker_enabled << 1) | (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
(qhp->attr.mpa_attr.crc_enabled << 2); (qhp->attr.mpa_attr.crc_enabled << 2);
/* init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
* XXX - The IWCM doesn't quite handle getting these uP_RI_QP_RDMA_WRITE_ENABLE |
* attrs set before going into RTS. For now, just turn uP_RI_QP_BIND_ENABLE;
* them on always... if (!qhp->ibqp.uobject)
*/ init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
#if 0 uP_RI_QP_FAST_REGISTER_ENABLE;
init_attr.qpcaps = qhp->attr.enableRdmaRead |
(qhp->attr.enableRdmaWrite << 1) |
(qhp->attr.enableBind << 2) |
(qhp->attr.enable_stag0_fastreg << 3) |
(qhp->attr.enable_stag0_fastreg << 4);
#else
init_attr.qpcaps = 0x1f;
#endif
init_attr.tcp_emss = qhp->ep->emss; init_attr.tcp_emss = qhp->ep->emss;
init_attr.ord = qhp->attr.max_ord; init_attr.ord = qhp->attr.max_ord;
init_attr.ird = qhp->attr.max_ird; init_attr.ird = qhp->attr.max_ird;
...@@ -900,8 +893,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, ...@@ -900,8 +893,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
init_attr.rqe_count = iwch_rqes_posted(qhp); init_attr.rqe_count = iwch_rqes_posted(qhp);
init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
if (!qhp->ibqp.uobject)
init_attr.flags |= PRIV_QP;
if (peer2peer) { if (peer2peer) {
init_attr.rtr_type = RTR_READ; init_attr.rtr_type = RTR_READ;
if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
......
...@@ -1259,7 +1259,7 @@ void ipath_kreceive(struct ipath_portdata *pd) ...@@ -1259,7 +1259,7 @@ void ipath_kreceive(struct ipath_portdata *pd)
*/ */
ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
" %x, len %x hdrq+%x rhf: %Lx\n", " %x, len %x hdrq+%x rhf: %Lx\n",
etail, tlen, l, etail, tlen, l, (unsigned long long)
le64_to_cpu(*(__le64 *) rhf_addr)); le64_to_cpu(*(__le64 *) rhf_addr));
if (ipath_debug & __IPATH_ERRPKTDBG) { if (ipath_debug & __IPATH_ERRPKTDBG) {
u32 j, *d, dw = rsize-2; u32 j, *d, dw = rsize-2;
...@@ -1457,7 +1457,8 @@ static void ipath_reset_availshadow(struct ipath_devdata *dd) ...@@ -1457,7 +1457,8 @@ static void ipath_reset_availshadow(struct ipath_devdata *dd)
0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
if (oldval != dd->ipath_pioavailshadow[i]) if (oldval != dd->ipath_pioavailshadow[i])
ipath_dbg("shadow[%d] was %Lx, now %lx\n", ipath_dbg("shadow[%d] was %Lx, now %lx\n",
i, oldval, dd->ipath_pioavailshadow[i]); i, (unsigned long long) oldval,
dd->ipath_pioavailshadow[i]);
} }
spin_unlock_irqrestore(&ipath_pioavail_lock, flags); spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
} }
......
...@@ -1032,7 +1032,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) ...@@ -1032,7 +1032,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
(unsigned long long) (unsigned long long)
ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
prev_val); (unsigned long long) prev_val);
guid = be64_to_cpu(dd->ipath_guid); guid = be64_to_cpu(dd->ipath_guid);
...@@ -1042,7 +1042,8 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) ...@@ -1042,7 +1042,8 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
ipath_dbg("No GUID for heartbeat, faking %llx\n", ipath_dbg("No GUID for heartbeat, faking %llx\n",
(unsigned long long)guid); (unsigned long long)guid);
} else } else
ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid); ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
(unsigned long long) guid);
ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
return ret; return ret;
} }
...@@ -2505,7 +2506,7 @@ static void autoneg_work(struct work_struct *work) ...@@ -2505,7 +2506,7 @@ static void autoneg_work(struct work_struct *work)
if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
ipath_ib_state(dd, dd->ipath_lastibcstat), ipath_ib_state(dd, dd->ipath_lastibcstat),
jiffies_to_msecs(jiffies)-startms); (unsigned long long) jiffies_to_msecs(jiffies)-startms);
dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
......
...@@ -356,9 +356,10 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, ...@@ -356,9 +356,10 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
dd->ipath_cregs->cr_iblinkerrrecovcnt); dd->ipath_cregs->cr_iblinkerrrecovcnt);
if (linkrecov != dd->ipath_lastlinkrecov) { if (linkrecov != dd->ipath_lastlinkrecov) {
ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n", ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
ibcs, ib_linkstate(dd, ibcs), (unsigned long long) ibcs,
ib_linkstate(dd, ibcs),
ipath_ibcstatus_str[ltstate], ipath_ibcstatus_str[ltstate],
linkrecov); (unsigned long long) linkrecov);
/* and no more until active again */ /* and no more until active again */
dd->ipath_lastlinkrecov = 0; dd->ipath_lastlinkrecov = 0;
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
...@@ -1118,9 +1119,11 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1118,9 +1119,11 @@ irqreturn_t ipath_intr(int irq, void *data)
if (unlikely(istat & ~dd->ipath_i_bitsextant)) if (unlikely(istat & ~dd->ipath_i_bitsextant))
ipath_dev_err(dd, ipath_dev_err(dd,
"interrupt with unknown interrupts %Lx set\n", "interrupt with unknown interrupts %Lx set\n",
(unsigned long long)
istat & ~dd->ipath_i_bitsextant); istat & ~dd->ipath_i_bitsextant);
else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */ else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat); ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
(unsigned long long) istat);
if (istat & INFINIPATH_I_ERROR) { if (istat & INFINIPATH_I_ERROR) {
ipath_stats.sps_errints++; ipath_stats.sps_errints++;
...@@ -1128,7 +1131,8 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1128,7 +1131,8 @@ irqreturn_t ipath_intr(int irq, void *data)
dd->ipath_kregs->kr_errorstatus); dd->ipath_kregs->kr_errorstatus);
if (!estat) if (!estat)
dev_info(&dd->pcidev->dev, "error interrupt (%Lx), " dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
"but no error bits set!\n", istat); "but no error bits set!\n",
(unsigned long long) istat);
else if (estat == -1LL) else if (estat == -1LL)
/* /*
* should we try clearing all, or hope next read * should we try clearing all, or hope next read
......
...@@ -1021,7 +1021,7 @@ static void sdma_complete(void *cookie, int status) ...@@ -1021,7 +1021,7 @@ static void sdma_complete(void *cookie, int status)
struct ipath_verbs_txreq *tx = cookie; struct ipath_verbs_txreq *tx = cookie;
struct ipath_qp *qp = tx->qp; struct ipath_qp *qp = tx->qp;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
unsigned int flags; unsigned long flags;
enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
...@@ -1051,7 +1051,7 @@ static void sdma_complete(void *cookie, int status) ...@@ -1051,7 +1051,7 @@ static void sdma_complete(void *cookie, int status)
static void decrement_dma_busy(struct ipath_qp *qp) static void decrement_dma_busy(struct ipath_qp *qp)
{ {
unsigned int flags; unsigned long flags;
if (atomic_dec_and_test(&qp->s_dma_busy)) { if (atomic_dec_and_test(&qp->s_dma_busy)) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
...@@ -1221,7 +1221,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, ...@@ -1221,7 +1221,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp,
unsigned flush_wc; unsigned flush_wc;
u32 control; u32 control;
int ret; int ret;
unsigned int flags; unsigned long flags;
piobuf = ipath_getpiobuf(dd, plen, NULL); piobuf = ipath_getpiobuf(dd, plen, NULL);
if (unlikely(piobuf == NULL)) { if (unlikely(piobuf == NULL)) {
......
...@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, ...@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
wc->vendor_err = cqe->vendor_err_syndrome; wc->vendor_err = cqe->vendor_err_syndrome;
} }
static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum) static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
{ {
return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_IPOIB_STATUS_IPV4F | MLX4_CQE_STATUS_IPV4F |
MLX4_CQE_IPOIB_STATUS_IPV4OPT | MLX4_CQE_STATUS_IPV4OPT |
MLX4_CQE_IPOIB_STATUS_IPV6 | MLX4_CQE_STATUS_IPV6 |
MLX4_CQE_IPOIB_STATUS_IPOK)) == MLX4_CQE_STATUS_IPOK)) ==
cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
MLX4_CQE_IPOIB_STATUS_IPOK)) && MLX4_CQE_STATUS_IPOK)) &&
(status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP | (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
MLX4_CQE_IPOIB_STATUS_TCP)) && MLX4_CQE_STATUS_TCP)) &&
checksum == cpu_to_be16(0xffff); checksum == cpu_to_be16(0xffff);
} }
...@@ -582,17 +582,17 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -582,17 +582,17 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
} }
if (!*cur_qp || if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
/* /*
* We do not have to take the QP table lock here, * We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed * because CQs will be locked while QPs are removed
* from the table. * from the table.
*/ */
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
be32_to_cpu(cqe->my_qpn)); be32_to_cpu(cqe->vlan_my_qpn));
if (unlikely(!mqp)) { if (unlikely(!mqp)) {
printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
return -EINVAL; return -EINVAL;
} }
...@@ -692,14 +692,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -692,14 +692,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
} }
wc->slid = be16_to_cpu(cqe->rlid); wc->slid = be16_to_cpu(cqe->rlid);
wc->sl = cqe->sl >> 4; wc->sl = be16_to_cpu(cqe->sl_vid >> 12);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status, wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
cqe->checksum);
} }
return 0; return 0;
...@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) ...@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/ */
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed; ++nfreed;
......
...@@ -902,7 +902,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -902,7 +902,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->mtu_msgmax = (IB_MTU_4096 << 5) | context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz); ilog2(dev->dev->caps.max_gso_sz);
else else
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
} else if (attr_mask & IB_QP_PATH_MTU) { } else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
printk(KERN_ERR "path MTU (%u) is invalid\n", printk(KERN_ERR "path MTU (%u) is invalid\n",
......
...@@ -337,7 +337,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev, ...@@ -337,7 +337,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev,
sge[i].length = PAGE_SIZE; sge[i].length = PAGE_SIZE;
wr->next = NULL; wr->next = NULL;
wr->sg_list = priv->cm.rx_sge; wr->sg_list = sge;
wr->num_sge = priv->cm.num_frags; wr->num_sge = priv->cm.num_frags;
} }
......
...@@ -39,17 +39,18 @@ ...@@ -39,17 +39,18 @@
#include <linux/mlx4/doorbell.h> #include <linux/mlx4/doorbell.h>
struct mlx4_cqe { struct mlx4_cqe {
__be32 my_qpn; __be32 vlan_my_qpn;
__be32 immed_rss_invalid; __be32 immed_rss_invalid;
__be32 g_mlpath_rqpn; __be32 g_mlpath_rqpn;
u8 sl; __be16 sl_vid;
u8 reserved1;
__be16 rlid; __be16 rlid;
__be32 ipoib_status; __be16 status;
u8 ipv6_ext_mask;
u8 badfcs_enc;
__be32 byte_cnt; __be32 byte_cnt;
__be16 wqe_index; __be16 wqe_index;
__be16 checksum; __be16 checksum;
u8 reserved2[3]; u8 reserved[3];
u8 owner_sr_opcode; u8 owner_sr_opcode;
}; };
...@@ -63,6 +64,11 @@ struct mlx4_err_cqe { ...@@ -63,6 +64,11 @@ struct mlx4_err_cqe {
u8 owner_sr_opcode; u8 owner_sr_opcode;
}; };
enum {
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_QPN_MASK = 0xffffff,
};
enum { enum {
MLX4_CQE_OWNER_MASK = 0x80, MLX4_CQE_OWNER_MASK = 0x80,
MLX4_CQE_IS_SEND_MASK = 0x40, MLX4_CQE_IS_SEND_MASK = 0x40,
...@@ -86,13 +92,19 @@ enum { ...@@ -86,13 +92,19 @@ enum {
}; };
enum { enum {
MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, MLX4_CQE_STATUS_IPV4 = 1 << 6,
MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, MLX4_CQE_STATUS_IPV4F = 1 << 7,
MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, MLX4_CQE_STATUS_IPV6 = 1 << 8,
MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, MLX4_CQE_STATUS_IPV4OPT = 1 << 9,
MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, MLX4_CQE_STATUS_TCP = 1 << 10,
MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, MLX4_CQE_STATUS_UDP = 1 << 11,
MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, MLX4_CQE_STATUS_IPOK = 1 << 12,
};
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
MLX4_CQE_BAD_FCS = 1 << 4,
}; };
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
......
...@@ -71,12 +71,8 @@ enum rdma_port_space { ...@@ -71,12 +71,8 @@ enum rdma_port_space {
}; };
struct rdma_addr { struct rdma_addr {
struct sockaddr src_addr; struct sockaddr_storage src_addr;
u8 src_pad[sizeof(struct sockaddr_in6) - struct sockaddr_storage dst_addr;
sizeof(struct sockaddr)];
struct sockaddr dst_addr;
u8 dst_pad[sizeof(struct sockaddr_in6) -
sizeof(struct sockaddr)];
struct rdma_dev_addr dev_addr; struct rdma_dev_addr dev_addr;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment