Commit 99f1c97d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (81 commits)
  RDMA/cxgb3: Fix the T3A workaround checks
  IB/ipath: Remove unnecessary cast
  IPoIB: Constify seq_operations function pointer tables
  RDMA/cxgb3: Mark QP as privileged based on user capabilities
  RDMA/cxgb3: Fix page shift calculation in build_phys_page_list()
  RDMA/cxgb3: Flush the receive queue when closing
  IB/ipath: Trivial simplification of ipath_make_ud_req()
  IB/mthca: Update latest "native Arbel" firmware revision
  IPoIB: Remove redundant check of netif_queue_stopped() in xmit handler
  IB/ipath: Add mappings from HW register to PortInfo port physical state
  IB/ipath: Changes to support PIO bandwidth check on IBA7220
  IB/ipath: Minor cleanup of unused fields and chip-specific errors
  IB/ipath: New sysfs entries to control 7220 features
  IB/ipath: Add new chip-specific functions to older chips, consistent init
  IB/ipath: Remove unused MDIO interface code
  IB/ehca: Prevent RDMA-related connection failures on some eHCA2 hardware
  IB/ehca: Add "port connection autodetect mode"
  IB/ehca: Define array to store SMI/GSI QPs
  IB/ehca: Remove CQ-QP-link before destroying QP in error path of create_qp()
  IB/iser: Add change_queue_depth method
  ...
parents b31fde6d 8176d297
...@@ -295,16 +295,6 @@ Who: linuxppc-dev@ozlabs.org ...@@ -295,16 +295,6 @@ Who: linuxppc-dev@ozlabs.org
--------------------------- ---------------------------
What: mthca driver's MSI support
When: January 2008
Files: drivers/infiniband/hw/mthca/*.[ch]
Why: All mthca hardware also supports MSI-X, which provides
strictly more functionality than MSI. So there is no point in
having both MSI-X and MSI support in the driver.
Who: Roland Dreier <rolandd@cisco.com>
---------------------------
What: sk98lin network driver What: sk98lin network driver
When: Feburary 2008 When: Feburary 2008
Why: In kernel tree version of driver is unmaintained. Sk98lin driver Why: In kernel tree version of driver is unmaintained. Sk98lin driver
......
This diff is collapsed.
...@@ -488,7 +488,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id) ...@@ -488,7 +488,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
} }
EXPORT_SYMBOL(rdma_destroy_qp); EXPORT_SYMBOL(rdma_destroy_qp);
static int cma_modify_qp_rtr(struct rdma_id_private *id_priv) static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
int qp_attr_mask, ret; int qp_attr_mask, ret;
...@@ -514,13 +515,16 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv) ...@@ -514,13 +515,16 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
if (ret) if (ret)
goto out; goto out;
if (conn_param)
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
out: out:
mutex_unlock(&id_priv->qp_mutex); mutex_unlock(&id_priv->qp_mutex);
return ret; return ret;
} }
static int cma_modify_qp_rts(struct rdma_id_private *id_priv) static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
int qp_attr_mask, ret; int qp_attr_mask, ret;
...@@ -536,6 +540,8 @@ static int cma_modify_qp_rts(struct rdma_id_private *id_priv) ...@@ -536,6 +540,8 @@ static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
if (ret) if (ret)
goto out; goto out;
if (conn_param)
qp_attr.max_rd_atomic = conn_param->initiator_depth;
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
out: out:
mutex_unlock(&id_priv->qp_mutex); mutex_unlock(&id_priv->qp_mutex);
...@@ -866,11 +872,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv) ...@@ -866,11 +872,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
{ {
int ret; int ret;
ret = cma_modify_qp_rtr(id_priv); ret = cma_modify_qp_rtr(id_priv, NULL);
if (ret) if (ret)
goto reject; goto reject;
ret = cma_modify_qp_rts(id_priv); ret = cma_modify_qp_rts(id_priv, NULL);
if (ret) if (ret)
goto reject; goto reject;
...@@ -1122,8 +1128,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -1122,8 +1128,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
cm_id->cm_handler = cma_ib_handler; cm_id->cm_handler = cma_ib_handler;
ret = conn_id->id.event_handler(&conn_id->id, &event); ret = conn_id->id.event_handler(&conn_id->id, &event);
if (!ret) if (!ret) {
cma_enable_remove(conn_id);
goto out; goto out;
}
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL; conn_id->cm_id.ib = NULL;
...@@ -1262,6 +1270,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1262,6 +1270,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct net_device *dev = NULL; struct net_device *dev = NULL;
struct rdma_cm_event event; struct rdma_cm_event event;
int ret; int ret;
struct ib_device_attr attr;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_remove(listen_id, CMA_LISTEN)) if (cma_disable_remove(listen_id, CMA_LISTEN))
...@@ -1311,10 +1320,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1311,10 +1320,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
*sin = iw_event->remote_addr; *sin = iw_event->remote_addr;
ret = ib_query_device(conn_id->id.device, &attr);
if (ret) {
cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id);
goto out;
}
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data = iw_event->private_data;
event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.private_data_len = iw_event->private_data_len;
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
event.param.conn.responder_resources = attr.max_qp_rd_atom;
ret = conn_id->id.event_handler(&conn_id->id, &event); ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) { if (ret) {
/* User wants to destroy the CM ID */ /* User wants to destroy the CM ID */
...@@ -2272,7 +2290,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, ...@@ -2272,7 +2290,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
cm_id->remote_addr = *sin; cm_id->remote_addr = *sin;
ret = cma_modify_qp_rtr(id_priv); ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret) if (ret)
goto out; goto out;
...@@ -2335,25 +2353,15 @@ static int cma_accept_ib(struct rdma_id_private *id_priv, ...@@ -2335,25 +2353,15 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param) struct rdma_conn_param *conn_param)
{ {
struct ib_cm_rep_param rep; struct ib_cm_rep_param rep;
struct ib_qp_attr qp_attr; int ret;
int qp_attr_mask, ret;
if (id_priv->id.qp) {
ret = cma_modify_qp_rtr(id_priv);
if (ret)
goto out;
qp_attr.qp_state = IB_QPS_RTS; ret = cma_modify_qp_rtr(id_priv, conn_param);
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr, if (ret)
&qp_attr_mask); goto out;
if (ret)
goto out;
qp_attr.max_rd_atomic = conn_param->initiator_depth; ret = cma_modify_qp_rts(id_priv, conn_param);
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); if (ret)
if (ret) goto out;
goto out;
}
memset(&rep, 0, sizeof rep); memset(&rep, 0, sizeof rep);
rep.qp_num = id_priv->qp_num; rep.qp_num = id_priv->qp_num;
...@@ -2378,7 +2386,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv, ...@@ -2378,7 +2386,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param; struct iw_cm_conn_param iw_param;
int ret; int ret;
ret = cma_modify_qp_rtr(id_priv); ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret) if (ret)
return ret; return ret;
...@@ -2598,11 +2606,9 @@ static void cma_set_mgid(struct rdma_id_private *id_priv, ...@@ -2598,11 +2606,9 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
/* IPv6 address is an SA assigned MGID. */ /* IPv6 address is an SA assigned MGID. */
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else { } else {
ip_ib_mc_map(sin->sin_addr.s_addr, mc_map); ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP) if (id_priv->id.ps == RDMA_PS_UDP)
mc_map[7] = 0x01; /* Use RDMA CM signature */ mc_map[7] = 0x01; /* Use RDMA CM signature */
mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
*mgid = *(union ib_gid *) (mc_map + 4); *mgid = *(union ib_gid *) (mc_map + 4);
} }
} }
......
...@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, ...@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
static void ib_fmr_batch_release(struct ib_fmr_pool *pool) static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
{ {
int ret; int ret;
struct ib_pool_fmr *fmr; struct ib_pool_fmr *fmr, *next;
LIST_HEAD(unmap_list); LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list); LIST_HEAD(fmr_list);
...@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ...@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#endif #endif
} }
/*
* The free_list may hold FMRs that have been put there
* because they haven't reached the max_remap count.
* Invalidate their mapping as well.
*/
list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
if (fmr->remap_count == 0)
continue;
hlist_del_init(&fmr->cache_node);
fmr->remap_count = 0;
list_add_tail(&fmr->fmr->list, &fmr_list);
list_move(&fmr->list, &unmap_list);
}
list_splice(&pool->dirty_list, &unmap_list); list_splice(&pool->dirty_list, &unmap_list);
INIT_LIST_HEAD(&pool->dirty_list); INIT_LIST_HEAD(&pool->dirty_list);
pool->dirty_len = 0; pool->dirty_len = 0;
...@@ -182,8 +196,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) ...@@ -182,8 +196,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
struct ib_fmr_pool *pool = pool_ptr; struct ib_fmr_pool *pool = pool_ptr;
do { do {
if (pool->dirty_len >= pool->dirty_watermark || if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
ib_fmr_batch_release(pool); ib_fmr_batch_release(pool);
atomic_inc(&pool->flush_ser); atomic_inc(&pool->flush_ser);
...@@ -194,8 +207,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr) ...@@ -194,8 +207,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
} }
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (pool->dirty_len < pool->dirty_watermark && if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
!kthread_should_stop()) !kthread_should_stop())
schedule(); schedule();
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -369,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) ...@@ -369,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
i = 0; i = 0;
list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
if (fmr->remap_count) {
INIT_LIST_HEAD(&fmr_list);
list_add_tail(&fmr->fmr->list, &fmr_list);
ib_unmap_fmr(&fmr_list);
}
ib_dealloc_fmr(fmr->fmr); ib_dealloc_fmr(fmr->fmr);
list_del(&fmr->list); list_del(&fmr->list);
kfree(fmr); kfree(fmr);
...@@ -511,8 +518,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) ...@@ -511,8 +518,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
list_add_tail(&fmr->list, &pool->free_list); list_add_tail(&fmr->list, &pool->free_list);
} else { } else {
list_add_tail(&fmr->list, &pool->dirty_list); list_add_tail(&fmr->list, &pool->dirty_list);
++pool->dirty_len; if (++pool->dirty_len >= pool->dirty_watermark) {
wake_up_process(pool->thread); atomic_inc(&pool->req_ser);
wake_up_process(pool->thread);
}
} }
} }
......
...@@ -701,7 +701,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -701,7 +701,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
} }
/* Check to post send on QP or process locally */ /* Check to post send on QP or process locally */
if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD) if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
goto out; goto out;
local = kmalloc(sizeof *local, GFP_ATOMIC); local = kmalloc(sizeof *local, GFP_ATOMIC);
...@@ -752,8 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -752,8 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
port_priv = ib_get_mad_port(mad_agent_priv->agent.device, port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
mad_agent_priv->agent.port_num); mad_agent_priv->agent.port_num);
if (port_priv) { if (port_priv) {
mad_priv->mad.mad.mad_hdr.tid = memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
((struct ib_mad *)smp)->mad_hdr.tid;
recv_mad_agent = find_mad_agent(port_priv, recv_mad_agent = find_mad_agent(port_priv,
&mad_priv->mad.mad); &mad_priv->mad.mad);
} }
...@@ -1100,7 +1100,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, ...@@ -1100,7 +1100,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
/* Timeout will be updated after send completes */ /* Timeout will be updated after send completes */
mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
mad_send_wr->retries = send_buf->retries; mad_send_wr->max_retries = send_buf->retries;
mad_send_wr->retries_left = send_buf->retries;
send_buf->retries = 0;
/* Reference for work request to QP + response */ /* Reference for work request to QP + response */
mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
mad_send_wr->status = IB_WC_SUCCESS; mad_send_wr->status = IB_WC_SUCCESS;
...@@ -1931,15 +1933,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, ...@@ -1931,15 +1933,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
if (port_priv->device->process_mad) { if (port_priv->device->process_mad) {
int ret; int ret;
if (!response) {
printk(KERN_ERR PFX "No memory for response MAD\n");
/*
* Is it better to assume that
* it wouldn't be processed ?
*/
goto out;
}
ret = port_priv->device->process_mad(port_priv->device, 0, ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num, port_priv->port_num,
wc, &recv->grh, wc, &recv->grh,
...@@ -2282,8 +2275,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) ...@@ -2282,8 +2275,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
/* Empty wait list to prevent receives from finding a request */ /* Empty wait list to prevent receives from finding a request */
list_splice_init(&mad_agent_priv->wait_list, &cancel_list); list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
/* Empty local completion list as well */
list_splice_init(&mad_agent_priv->local_list, &cancel_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Report all cancelled requests */ /* Report all cancelled requests */
...@@ -2445,9 +2436,12 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -2445,9 +2436,12 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
{ {
int ret; int ret;
if (!mad_send_wr->retries--) if (!mad_send_wr->retries_left)
return -ETIMEDOUT; return -ETIMEDOUT;
mad_send_wr->retries_left--;
mad_send_wr->send_buf.retries++;
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
......
...@@ -131,7 +131,8 @@ struct ib_mad_send_wr_private { ...@@ -131,7 +131,8 @@ struct ib_mad_send_wr_private {
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid; __be64 tid;
unsigned long timeout; unsigned long timeout;
int retries; int max_retries;
int retries_left;
int retry; int retry;
int refcount; int refcount;
enum ib_wc_status status; enum ib_wc_status status;
......
...@@ -684,7 +684,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -684,7 +684,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (seg_num > mad_send_wr->last_ack) { if (seg_num > mad_send_wr->last_ack) {
adjust_last_ack(mad_send_wr, seg_num); adjust_last_ack(mad_send_wr, seg_num);
mad_send_wr->retries = mad_send_wr->send_buf.retries; mad_send_wr->retries_left = mad_send_wr->max_retries;
} }
mad_send_wr->newwin = newwin; mad_send_wr->newwin = newwin;
if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
......
...@@ -73,11 +73,20 @@ struct mcast_device { ...@@ -73,11 +73,20 @@ struct mcast_device {
}; };
enum mcast_state { enum mcast_state {
MCAST_IDLE,
MCAST_JOINING, MCAST_JOINING,
MCAST_MEMBER, MCAST_MEMBER,
MCAST_ERROR,
};
enum mcast_group_state {
MCAST_IDLE,
MCAST_BUSY, MCAST_BUSY,
MCAST_ERROR MCAST_GROUP_ERROR,
MCAST_PKEY_EVENT
};
enum {
MCAST_INVALID_PKEY_INDEX = 0xFFFF
}; };
struct mcast_member; struct mcast_member;
...@@ -93,9 +102,10 @@ struct mcast_group { ...@@ -93,9 +102,10 @@ struct mcast_group {
struct mcast_member *last_join; struct mcast_member *last_join;
int members[3]; int members[3];
atomic_t refcount; atomic_t refcount;
enum mcast_state state; enum mcast_group_state state;
struct ib_sa_query *query; struct ib_sa_query *query;
int query_id; int query_id;
u16 pkey_index;
}; };
struct mcast_member { struct mcast_member {
...@@ -378,9 +388,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member, ...@@ -378,9 +388,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member,
static void process_group_error(struct mcast_group *group) static void process_group_error(struct mcast_group *group)
{ {
struct mcast_member *member; struct mcast_member *member;
int ret; int ret = 0;
u16 pkey_index;
if (group->state == MCAST_PKEY_EVENT)
ret = ib_find_pkey(group->port->dev->device,
group->port->port_num,
be16_to_cpu(group->rec.pkey), &pkey_index);
spin_lock_irq(&group->lock); spin_lock_irq(&group->lock);
if (group->state == MCAST_PKEY_EVENT && !ret &&
group->pkey_index == pkey_index)
goto out;
while (!list_empty(&group->active_list)) { while (!list_empty(&group->active_list)) {
member = list_entry(group->active_list.next, member = list_entry(group->active_list.next,
struct mcast_member, list); struct mcast_member, list);
...@@ -399,6 +419,7 @@ static void process_group_error(struct mcast_group *group) ...@@ -399,6 +419,7 @@ static void process_group_error(struct mcast_group *group)
} }
group->rec.join_state = 0; group->rec.join_state = 0;
out:
group->state = MCAST_BUSY; group->state = MCAST_BUSY;
spin_unlock_irq(&group->lock); spin_unlock_irq(&group->lock);
} }
...@@ -415,9 +436,9 @@ static void mcast_work_handler(struct work_struct *work) ...@@ -415,9 +436,9 @@ static void mcast_work_handler(struct work_struct *work)
retest: retest:
spin_lock_irq(&group->lock); spin_lock_irq(&group->lock);
while (!list_empty(&group->pending_list) || while (!list_empty(&group->pending_list) ||
(group->state == MCAST_ERROR)) { (group->state != MCAST_BUSY)) {
if (group->state == MCAST_ERROR) { if (group->state != MCAST_BUSY) {
spin_unlock_irq(&group->lock); spin_unlock_irq(&group->lock);
process_group_error(group); process_group_error(group);
goto retest; goto retest;
...@@ -494,12 +515,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec, ...@@ -494,12 +515,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
void *context) void *context)
{ {
struct mcast_group *group = context; struct mcast_group *group = context;
u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
if (status) if (status)
process_join_error(group, status); process_join_error(group, status);
else { else {
ib_find_pkey(group->port->dev->device, group->port->port_num,
be16_to_cpu(rec->pkey), &pkey_index);
spin_lock_irq(&group->port->lock); spin_lock_irq(&group->port->lock);
group->rec = *rec; group->rec = *rec;
if (group->state == MCAST_BUSY &&
group->pkey_index == MCAST_INVALID_PKEY_INDEX)
group->pkey_index = pkey_index;
if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) { if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
rb_erase(&group->node, &group->port->table); rb_erase(&group->node, &group->port->table);
mcast_insert(group->port, group, 1); mcast_insert(group->port, group, 1);
...@@ -539,6 +567,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port, ...@@ -539,6 +567,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
group->port = port; group->port = port;
group->rec.mgid = *mgid; group->rec.mgid = *mgid;
group->pkey_index = MCAST_INVALID_PKEY_INDEX;
INIT_LIST_HEAD(&group->pending_list); INIT_LIST_HEAD(&group->pending_list);
INIT_LIST_HEAD(&group->active_list); INIT_LIST_HEAD(&group->active_list);
INIT_WORK(&group->work, mcast_work_handler); INIT_WORK(&group->work, mcast_work_handler);
...@@ -707,7 +736,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, ...@@ -707,7 +736,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
} }
EXPORT_SYMBOL(ib_init_ah_from_mcmember); EXPORT_SYMBOL(ib_init_ah_from_mcmember);
static void mcast_groups_lost(struct mcast_port *port) static void mcast_groups_event(struct mcast_port *port,
enum mcast_group_state state)
{ {
struct mcast_group *group; struct mcast_group *group;
struct rb_node *node; struct rb_node *node;
...@@ -721,7 +751,8 @@ static void mcast_groups_lost(struct mcast_port *port) ...@@ -721,7 +751,8 @@ static void mcast_groups_lost(struct mcast_port *port)
atomic_inc(&group->refcount); atomic_inc(&group->refcount);
queue_work(mcast_wq, &group->work); queue_work(mcast_wq, &group->work);
} }
group->state = MCAST_ERROR; if (group->state != MCAST_GROUP_ERROR)
group->state = state;
spin_unlock(&group->lock); spin_unlock(&group->lock);
} }
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);
...@@ -731,16 +762,20 @@ static void mcast_event_handler(struct ib_event_handler *handler, ...@@ -731,16 +762,20 @@ static void mcast_event_handler(struct ib_event_handler *handler,
struct ib_event *event) struct ib_event *event)
{ {
struct mcast_device *dev; struct mcast_device *dev;
int index;
dev = container_of(handler, struct mcast_device, event_handler); dev = container_of(handler, struct mcast_device, event_handler);
index = event->element.port_num - dev->start_port;
switch (event->event) { switch (event->event) {
case IB_EVENT_PORT_ERR: case IB_EVENT_PORT_ERR:
case IB_EVENT_LID_CHANGE: case IB_EVENT_LID_CHANGE:
case IB_EVENT_SM_CHANGE: case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER: case IB_EVENT_CLIENT_REREGISTER:
mcast_groups_lost(&dev->port[event->element.port_num - mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
dev->start_port]); break;
case IB_EVENT_PKEY_CHANGE:
mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
break; break;
default: default:
break; break;
......
...@@ -59,7 +59,8 @@ extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, ...@@ -59,7 +59,8 @@ extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num); u8 node_type, int port_num);
/* /*
* Return 1 if the SMP should be handled by the local SMA/SM via process_mad * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
* via process_mad
*/ */
static inline enum smi_action smi_check_local_smp(struct ib_smp *smp, static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
struct ib_device *device) struct ib_device *device)
...@@ -71,4 +72,19 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp, ...@@ -71,4 +72,19 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
(smp->hop_ptr == smp->hop_cnt + 1)) ? (smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD); IB_SMI_HANDLE : IB_SMI_DISCARD);
} }
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
* via process_mad
*/
static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
struct ib_device *device)
{
/* C14-13:3 -- We're at the end of the DR segment of path */
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
return ((device->process_mad &&
ib_get_smp_direction(smp) &&
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
#endif /* __SMI_H_ */ #endif /* __SMI_H_ */
...@@ -106,6 +106,9 @@ enum { ...@@ -106,6 +106,9 @@ enum {
IB_UCM_MAX_DEVICES = 32 IB_UCM_MAX_DEVICES = 32
}; };
/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
extern struct class cm_class;
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR) #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
static void ib_ucm_add_one(struct ib_device *device); static void ib_ucm_add_one(struct ib_device *device);
...@@ -1199,7 +1202,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) ...@@ -1199,7 +1202,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0; return 0;
} }
static void ib_ucm_release_class_dev(struct class_device *class_dev) static void ucm_release_class_dev(struct class_device *class_dev)
{ {
struct ib_ucm_device *dev; struct ib_ucm_device *dev;
...@@ -1217,11 +1220,6 @@ static const struct file_operations ucm_fops = { ...@@ -1217,11 +1220,6 @@ static const struct file_operations ucm_fops = {
.poll = ib_ucm_poll, .poll = ib_ucm_poll,
}; };
static struct class ucm_class = {
.name = "infiniband_cm",
.release = ib_ucm_release_class_dev
};
static ssize_t show_ibdev(struct class_device *class_dev, char *buf) static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{ {
struct ib_ucm_device *dev; struct ib_ucm_device *dev;
...@@ -1257,9 +1255,10 @@ static void ib_ucm_add_one(struct ib_device *device) ...@@ -1257,9 +1255,10 @@ static void ib_ucm_add_one(struct ib_device *device)
if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1)) if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
goto err; goto err;
ucm_dev->class_dev.class = &ucm_class; ucm_dev->class_dev.class = &cm_class;
ucm_dev->class_dev.dev = device->dma_device; ucm_dev->class_dev.dev = device->dma_device;
ucm_dev->class_dev.devt = ucm_dev->dev.dev; ucm_dev->class_dev.devt = ucm_dev->dev.dev;
ucm_dev->class_dev.release = ucm_release_class_dev;
snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d", snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
ucm_dev->devnum); ucm_dev->devnum);
if (class_device_register(&ucm_dev->class_dev)) if (class_device_register(&ucm_dev->class_dev))
...@@ -1306,40 +1305,34 @@ static int __init ib_ucm_init(void) ...@@ -1306,40 +1305,34 @@ static int __init ib_ucm_init(void)
"infiniband_cm"); "infiniband_cm");
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register device number\n"); printk(KERN_ERR "ucm: couldn't register device number\n");
goto err; goto error1;
} }
ret = class_register(&ucm_class); ret = class_create_file(&cm_class, &class_attr_abi_version);
if (ret) {
printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
goto err_chrdev;
}
ret = class_create_file(&ucm_class, &class_attr_abi_version);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't create abi_version attribute\n"); printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
goto err_class; goto error2;
} }
ret = ib_register_client(&ucm_client); ret = ib_register_client(&ucm_client);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register client\n"); printk(KERN_ERR "ucm: couldn't register client\n");
goto err_class; goto error3;
} }
return 0; return 0;
err_class: error3:
class_unregister(&ucm_class); class_remove_file(&cm_class, &class_attr_abi_version);
err_chrdev: error2:
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
err: error1:
return ret; return ret;
} }
static void __exit ib_ucm_cleanup(void) static void __exit ib_ucm_cleanup(void)
{ {
ib_unregister_client(&ucm_client); ib_unregister_client(&ucm_client);
class_unregister(&ucm_class); class_remove_file(&cm_class, &class_attr_abi_version);
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
idr_destroy(&ctx_id_table); idr_destroy(&ctx_id_table);
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/file.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/idr.h> #include <linux/idr.h>
...@@ -991,6 +992,96 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, ...@@ -991,6 +992,96 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
return ret; return ret;
} }
static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
{
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) {
mutex_lock(&file1->mut);
mutex_lock(&file2->mut);
} else {
mutex_lock(&file2->mut);
mutex_lock(&file1->mut);
}
}
static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
{
if (file1 < file2) {
mutex_unlock(&file2->mut);
mutex_unlock(&file1->mut);
} else {
mutex_unlock(&file1->mut);
mutex_unlock(&file2->mut);
}
}
static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
{
struct ucma_event *uevent, *tmp;
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
if (uevent->ctx == ctx)
list_move_tail(&uevent->list, &file->event_list);
}
static ssize_t ucma_migrate_id(struct ucma_file *new_file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_migrate_id cmd;
struct rdma_ucm_migrate_resp resp;
struct ucma_context *ctx;
struct file *filp;
struct ucma_file *cur_file;
int ret = 0;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
/* Get current fd to protect against it being closed */
filp = fget(cmd.fd);
if (!filp)
return -ENOENT;
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(filp->private_data, cmd.id);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto file_put;
}
cur_file = ctx->file;
if (cur_file == new_file) {
resp.events_reported = ctx->events_reported;
goto response;
}
/*
* Migrate events between fd's, maintaining order, and avoiding new
* events being added before existing events.
*/
ucma_lock_files(cur_file, new_file);
mutex_lock(&mut);
list_move_tail(&ctx->list, &new_file->ctx_list);
ucma_move_events(ctx, new_file);
ctx->file = new_file;
resp.events_reported = ctx->events_reported;
mutex_unlock(&mut);
ucma_unlock_files(cur_file, new_file);
response:
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
ret = -EFAULT;
ucma_put_ctx(ctx);
file_put:
fput(filp);
return ret;
}
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
const char __user *inbuf, const char __user *inbuf,
int in_len, int out_len) = { int in_len, int out_len) = {
...@@ -1012,6 +1103,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, ...@@ -1012,6 +1103,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
[RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
[RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
}; };
static ssize_t ucma_write(struct file *filp, const char __user *buf, static ssize_t ucma_write(struct file *filp, const char __user *buf,
......
This diff is collapsed.
...@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) ...@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
setup.size = 1UL << cq->size_log2; setup.size = 1UL << cq->size_log2;
setup.credits = 65535; setup.credits = 65535;
setup.credit_thres = 1; setup.credit_thres = 1;
if (rdev_p->t3cdev_p->type == T3B) if (rdev_p->t3cdev_p->type != T3A)
setup.ovfl_mode = 0; setup.ovfl_mode = 0;
else else
setup.ovfl_mode = 1; setup.ovfl_mode = 1;
...@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, ...@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
{ {
u32 i, nr_wqe, copy_len; u32 i, nr_wqe, copy_len;
u8 *copy_data; u8 *copy_data;
u8 wr_len, utx_len; /* lenght in 8 byte flit */ u8 wr_len, utx_len; /* length in 8 byte flit */
enum t3_wr_flags flag; enum t3_wr_flags flag;
__be64 *wqe; __be64 *wqe;
u64 utx_cmd; u64 utx_cmd;
......
...@@ -315,7 +315,7 @@ struct t3_rdma_init_wr { ...@@ -315,7 +315,7 @@ struct t3_rdma_init_wr {
__be32 ird; __be32 ird;
__be64 qp_dma_addr; /* 7 */ __be64 qp_dma_addr; /* 7 */
__be32 qp_dma_size; /* 8 */ __be32 qp_dma_size; /* 8 */
u32 irs; __be32 irs;
}; };
struct t3_genbit { struct t3_genbit {
...@@ -324,7 +324,8 @@ struct t3_genbit { ...@@ -324,7 +324,8 @@ struct t3_genbit {
}; };
enum rdma_init_wr_flags { enum rdma_init_wr_flags {
RECVS_POSTED = 1, RECVS_POSTED = (1<<0),
PRIV_QP = (1<<1),
}; };
union t3_wr { union t3_wr {
......
...@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) ...@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
status2errno(rpl->status)); status2errno(rpl->status));
connect_reply_upcall(ep, status2errno(rpl->status)); connect_reply_upcall(ep, status2errno(rpl->status));
state_set(&ep->com, DEAD); state_set(&ep->com, DEAD);
if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status)) if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
release_tid(ep->com.tdev, GET_TID(rpl), NULL); release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid); cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst); dst_release(ep->dst);
...@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, ...@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
skb_trim(skb, sizeof(struct cpl_tid_release)); skb_trim(skb, sizeof(struct cpl_tid_release));
skb_get(skb); skb_get(skb);
if (tdev->type == T3B) if (tdev->type != T3A)
release_tid(tdev, hwtid, skb); release_tid(tdev, hwtid, skb);
else { else {
struct cpl_pass_accept_rpl *rpl; struct cpl_pass_accept_rpl *rpl;
......
...@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, ...@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
*total_size += buffer_list[i].size; *total_size += buffer_list[i].size;
if (i > 0) if (i > 0)
mask |= buffer_list[i].addr; mask |= buffer_list[i].addr;
else
mask |= buffer_list[i].addr & PAGE_MASK;
if (i != num_phys_buf - 1)
mask |= buffer_list[i].addr + buffer_list[i].size;
else
mask |= (buffer_list[i].addr + buffer_list[i].size +
PAGE_SIZE - 1) & PAGE_MASK;
} }
if (*total_size > 0xFFFFFFFFULL) if (*total_size > 0xFFFFFFFFULL)
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err) if (err)
goto err; goto err;
if (udata && t3b_device(rhp)) { if (udata && !t3a_device(rhp)) {
uresp.pbl_addr = (mhp->attr.pbl_addr - uresp.pbl_addr = (mhp->attr.pbl_addr -
rhp->rdev.rnic_info.pbl_base) >> 3; rhp->rdev.rnic_info.pbl_base) >> 3;
PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
...@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf) ...@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
struct net_device *lldev = dev->rdev.t3cdev_p->lldev; struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.fw_version); return sprintf(buf, "%s\n", info.fw_version);
} }
...@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf) ...@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
struct net_device *lldev = dev->rdev.t3cdev_p->lldev; struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
rtnl_lock();
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
rtnl_unlock();
return sprintf(buf, "%s\n", info.driver); return sprintf(buf, "%s\n", info.driver);
} }
......
...@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, ...@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe, static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
struct ib_recv_wr *wr) struct ib_recv_wr *wr)
{ {
int i, err = 0; int i;
u32 pbl_addr[4];
u8 page_size[4];
if (wr->num_sge > T3_MAX_SGE) if (wr->num_sge > T3_MAX_SGE)
return -EINVAL; return -EINVAL;
err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
page_size);
if (err)
return err;
wqe->recv.pagesz[0] = page_size[0];
wqe->recv.pagesz[1] = page_size[1];
wqe->recv.pagesz[2] = page_size[2];
wqe->recv.pagesz[3] = page_size[3];
wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
for (i = 0; i < wr->num_sge; i++) { for (i = 0; i < wr->num_sge; i++) {
wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
/* to in the WQE == the offset into the page */
wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
(1UL << (12 + page_size[i])));
/* pbl_addr is the adapters address in the PBL */
wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
} }
for (; i < T3_MAX_SGE; i++) { for (; i < T3_MAX_SGE; i++) {
wqe->recv.sgl[i].stag = 0; wqe->recv.sgl[i].stag = 0;
wqe->recv.sgl[i].len = 0; wqe->recv.sgl[i].len = 0;
wqe->recv.sgl[i].to = 0; wqe->recv.sgl[i].to = 0;
wqe->recv.pbl_addr[i] = 0;
} }
return 0; return 0;
} }
...@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) ...@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
cxio_flush_rq(&qhp->wq, &rchp->cq, count); cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag); spin_unlock_irqrestore(&rchp->lock, *flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
/* locking heirarchy: cq lock first, then qp lock. */ /* locking heirarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag); spin_lock_irqsave(&schp->lock, *flag);
...@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) ...@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
cxio_flush_sq(&qhp->wq, &schp->cq, count); cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag); spin_unlock_irqrestore(&schp->lock, *flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
/* deref */ /* deref */
if (atomic_dec_and_test(&qhp->refcnt)) if (atomic_dec_and_test(&qhp->refcnt))
...@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) ...@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{ {
if (t3b_device(qhp->rhp)) if (qhp->ibqp.uobject)
cxio_set_wq_in_error(&qhp->wq); cxio_set_wq_in_error(&qhp->wq);
else else
__flush_qp(qhp, flag); __flush_qp(qhp, flag);
...@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, ...@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr.qp_dma_addr = qhp->wq.dma_addr; init_attr.qp_dma_addr = qhp->wq.dma_addr;
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0; init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
init_attr.irs = qhp->ep->rcv_seq; init_attr.irs = qhp->ep->rcv_seq;
PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
"flags 0x%x qpcaps 0x%x\n", __FUNCTION__, "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
...@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, ...@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
disconnect = 1; disconnect = 1;
ep = qhp->ep; ep = qhp->ep;
} }
flush_qp(qhp, &flag);
break; break;
case IWCH_QP_STATE_TERMINATE: case IWCH_QP_STATE_TERMINATE:
qhp->attr.state = IWCH_QP_STATE_TERMINATE; qhp->attr.state = IWCH_QP_STATE_TERMINATE;
if (t3b_device(qhp->rhp)) if (qhp->ibqp.uobject)
cxio_set_wq_in_error(&qhp->wq); cxio_set_wq_in_error(&qhp->wq);
if (!internal) if (!internal)
terminate = 1; terminate = 1;
......
/* /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER * IBM eServer eHCA Infiniband device driver for Linux on POWER
* *
* adress vector functions * address vector functions
* *
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Khadija Souissi <souissik@de.ibm.com> * Khadija Souissi <souissik@de.ibm.com>
......
...@@ -94,7 +94,11 @@ struct ehca_sma_attr { ...@@ -94,7 +94,11 @@ struct ehca_sma_attr {
struct ehca_sport { struct ehca_sport {
struct ib_cq *ibcq_aqp1; struct ib_cq *ibcq_aqp1;
struct ib_qp *ibqp_aqp1; struct ib_qp *ibqp_sqp[2];
/* lock to serialze modify_qp() calls for sqp in normal
* and irq path (when event PORT_ACTIVE is received first time)
*/
spinlock_t mod_sqp_lock;
enum ib_port_state port_state; enum ib_port_state port_state;
struct ehca_sma_attr saved_attr; struct ehca_sma_attr saved_attr;
}; };
...@@ -141,6 +145,14 @@ enum ehca_ext_qp_type { ...@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
EQPT_SRQ = 3, EQPT_SRQ = 3,
}; };
/* struct to cache modify_qp()'s parms for GSI/SMI qp */
struct ehca_mod_qp_parm {
int mask;
struct ib_qp_attr attr;
};
#define EHCA_MOD_QP_PARM_MAX 4
struct ehca_qp { struct ehca_qp {
union { union {
struct ib_qp ib_qp; struct ib_qp ib_qp;
...@@ -164,10 +176,18 @@ struct ehca_qp { ...@@ -164,10 +176,18 @@ struct ehca_qp {
struct ehca_cq *recv_cq; struct ehca_cq *recv_cq;
unsigned int sqerr_purgeflag; unsigned int sqerr_purgeflag;
struct hlist_node list_entries; struct hlist_node list_entries;
/* array to cache modify_qp()'s parms for GSI/SMI qp */
struct ehca_mod_qp_parm *mod_qp_parm;
int mod_qp_parm_idx;
/* mmap counter for resources mapped into user space */ /* mmap counter for resources mapped into user space */
u32 mm_count_squeue; u32 mm_count_squeue;
u32 mm_count_rqueue; u32 mm_count_rqueue;
u32 mm_count_galpa; u32 mm_count_galpa;
/* unsolicited ack circumvention */
int unsol_ack_circ;
int mtu_shift;
u32 message_count;
u32 packet_count;
}; };
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
...@@ -323,6 +343,7 @@ extern int ehca_port_act_time; ...@@ -323,6 +343,7 @@ extern int ehca_port_act_time;
extern int ehca_use_hp_mr; extern int ehca_use_hp_mr;
extern int ehca_scaling_code; extern int ehca_scaling_code;
extern int ehca_lock_hcalls; extern int ehca_lock_hcalls;
extern int ehca_nr_ports;
struct ipzu_queue_resp { struct ipzu_queue_resp {
u32 qe_size; /* queue entry size */ u32 qe_size; /* queue entry size */
......
...@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, ...@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
} else { } else {
if (h_ret != H_PAGE_REGISTERED) { if (h_ret != H_PAGE_REGISTERED) {
ehca_err(device, "Registration of page failed " ehca_err(device, "Registration of page failed "
"ehca_cq=%p cq_num=%x h_ret=%li" "ehca_cq=%p cq_num=%x h_ret=%li "
"counter=%i act_pages=%i", "counter=%i act_pages=%i",
my_cq, my_cq->cq_number, my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages); h_ret, counter, param.act_pages);
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15) #define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16) #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16) #define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63) #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7) #define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
...@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) ...@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
{ {
u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
u8 spec_event;
struct ehca_sport *sport = &shca->sport[port - 1];
unsigned long flags;
switch (ec) { switch (ec) {
case 0x30: /* port availability change */ case 0x30: /* port availability change */
if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
shca->sport[port - 1].port_state = IB_PORT_ACTIVE; int suppress_event;
/* replay modify_qp for sqps */
spin_lock_irqsave(&sport->mod_sqp_lock, flags);
suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
if (sport->ibqp_sqp[IB_QPT_SMI])
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
if (!suppress_event)
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
/* AQP1 was destroyed, ignore this event */
if (suppress_event)
break;
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active"); "is active");
ehca_query_sma_attr(shca, port, ehca_query_sma_attr(shca, port,
&shca->sport[port - 1].saved_attr); &sport->saved_attr);
} else { } else {
shca->sport[port - 1].port_state = IB_PORT_DOWN; sport->port_state = IB_PORT_DOWN;
dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
"is inactive"); "is inactive");
} }
...@@ -378,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) ...@@ -378,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
ehca_warn(&shca->ib_device, "disruptive port " ehca_warn(&shca->ib_device, "disruptive port "
"%d configuration change", port); "%d configuration change", port);
shca->sport[port - 1].port_state = IB_PORT_DOWN; sport->port_state = IB_PORT_DOWN;
dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
"is inactive"); "is inactive");
shca->sport[port - 1].port_state = IB_PORT_ACTIVE; sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active"); "is active");
} else } else
...@@ -394,6 +412,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe) ...@@ -394,6 +412,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
case 0x33: /* trace stopped */ case 0x33: /* trace stopped */
ehca_err(&shca->ib_device, "Traced stopped."); ehca_err(&shca->ib_device, "Traced stopped.");
break; break;
case 0x34: /* util async event */
spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
if (spec_event == 0x80) /* client reregister required */
dispatch_port_event(shca, port,
IB_EVENT_CLIENT_REREGISTER,
"client reregister req.");
else
ehca_warn(&shca->ib_device, "Unknown util async "
"event %x on port %x", spec_event, port);
break;
default: default:
ehca_err(&shca->ib_device, "Unknown event code: %x on %s.", ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
ec, shca->ib_device.name); ec, shca->ib_device.name);
......
...@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr); ...@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) #define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif #endif
void ehca_recover_sqp(struct ib_qp *sqp);
#endif #endif
...@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level, ...@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
"hardware level" "hardware level"
" (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
MODULE_PARM_DESC(nr_ports, MODULE_PARM_DESC(nr_ports,
"number of connected ports (default: 2)"); "number of connected ports (-1: autodetect, 1: port one only, "
"2: two ports (default)");
MODULE_PARM_DESC(use_hp_mr, MODULE_PARM_DESC(use_hp_mr,
"high performance MRs (0: no (default), 1: yes)"); "high performance MRs (0: no (default), 1: yes)");
MODULE_PARM_DESC(port_act_time, MODULE_PARM_DESC(port_act_time,
...@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) ...@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
} }
sport->ibcq_aqp1 = ibcq; sport->ibcq_aqp1 = ibcq;
if (sport->ibqp_aqp1) { if (sport->ibqp_sqp[IB_QPT_GSI]) {
ehca_err(&shca->ib_device, "AQP1 QP is already created."); ehca_err(&shca->ib_device, "AQP1 QP is already created.");
ret = -EPERM; ret = -EPERM;
goto create_aqp1; goto create_aqp1;
...@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) ...@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
ret = PTR_ERR(ibqp); ret = PTR_ERR(ibqp);
goto create_aqp1; goto create_aqp1;
} }
sport->ibqp_aqp1 = ibqp; sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
return 0; return 0;
...@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport) ...@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
{ {
int ret; int ret;
ret = ib_destroy_qp(sport->ibqp_aqp1); ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
if (ret) { if (ret) {
ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret); ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
return ret; return ret;
...@@ -693,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev, ...@@ -693,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
struct ehca_shca *shca; struct ehca_shca *shca;
const u64 *handle; const u64 *handle;
struct ib_pd *ibpd; struct ib_pd *ibpd;
int ret; int ret, i;
handle = of_get_property(dev->node, "ibm,hca-handle", NULL); handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
if (!handle) { if (!handle) {
...@@ -714,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev, ...@@ -714,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
return -ENOMEM; return -ENOMEM;
} }
mutex_init(&shca->modify_mutex); mutex_init(&shca->modify_mutex);
for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
spin_lock_init(&shca->sport[i].mod_sqp_lock);
shca->ofdev = dev; shca->ofdev = dev;
shca->ipz_hca_handle.handle = *handle; shca->ipz_hca_handle.handle = *handle;
...@@ -934,7 +937,7 @@ void ehca_poll_eqs(unsigned long data) ...@@ -934,7 +937,7 @@ void ehca_poll_eqs(unsigned long data)
ehca_process_eq(shca, 0); ehca_process_eq(shca, 0);
} }
} }
mod_timer(&poll_eqs_timer, jiffies + HZ); mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
spin_unlock(&shca_list_lock); spin_unlock(&shca_list_lock);
} }
......
This diff is collapsed.
...@@ -50,6 +50,9 @@ ...@@ -50,6 +50,9 @@
#include "hcp_if.h" #include "hcp_if.h"
#include "hipz_fns.h" #include "hipz_fns.h"
/* in RC traffic, insert an empty RDMA READ every this many packets */
#define ACK_CIRC_THRESHOLD 2000000
static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
struct ehca_wqe *wqe_p, struct ehca_wqe *wqe_p,
struct ib_recv_wr *recv_wr) struct ib_recv_wr *recv_wr)
...@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, ...@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
if (ehca_debug_level) { if (ehca_debug_level) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue); ipz_rqueue);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
} }
return 0; return 0;
...@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) ...@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
static inline int ehca_write_swqe(struct ehca_qp *qp, static inline int ehca_write_swqe(struct ehca_qp *qp,
struct ehca_wqe *wqe_p, struct ehca_wqe *wqe_p,
const struct ib_send_wr *send_wr) const struct ib_send_wr *send_wr,
int hidden)
{ {
u32 idx; u32 idx;
u64 dma_length; u64 dma_length;
...@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, ...@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
wqe_p->wr_flag = 0; wqe_p->wr_flag = 0;
if (send_wr->send_flags & IB_SEND_SIGNALED) if ((send_wr->send_flags & IB_SEND_SIGNALED ||
qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
&& !hidden)
wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
if (send_wr->opcode == IB_WR_SEND_WITH_IMM || if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
...@@ -199,7 +205,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, ...@@ -199,7 +205,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
wqe_p->local_ee_context_qkey = remote_qkey; wqe_p->local_ee_context_qkey = remote_qkey;
if (!send_wr->wr.ud.ah) { if (unlikely(!send_wr->wr.ud.ah)) {
ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL; return -EINVAL;
} }
...@@ -255,6 +261,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, ...@@ -255,6 +261,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
} /* eof idx */ } /* eof idx */
wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
/* unsolicited ack circumvention */
if (send_wr->opcode == IB_WR_RDMA_READ) {
/* on RDMA read, switch on and reset counters */
qp->message_count = qp->packet_count = 0;
qp->unsol_ack_circ = 1;
} else
/* else estimate #packets */
qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
break; break;
default: default:
...@@ -355,13 +370,49 @@ static inline void map_ib_wc_status(u32 cqe_status, ...@@ -355,13 +370,49 @@ static inline void map_ib_wc_status(u32 cqe_status,
*wc_status = IB_WC_SUCCESS; *wc_status = IB_WC_SUCCESS;
} }
static inline int post_one_send(struct ehca_qp *my_qp,
struct ib_send_wr *cur_send_wr,
struct ib_send_wr **bad_send_wr,
int hidden)
{
struct ehca_wqe *wqe_p;
int ret;
u64 start_offset = my_qp->ipz_squeue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
if (bad_send_wr)
*bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
}
/* write a SEND WQE into the QUEUE */
ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
/*
* if something failed,
* reset the free entry pointer to the start value
*/
if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset;
if (bad_send_wr)
*bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Could not write WQE "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -EINVAL;
}
return 0;
}
int ehca_post_send(struct ib_qp *qp, int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr *send_wr, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr) struct ib_send_wr **bad_send_wr)
{ {
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
struct ib_send_wr *cur_send_wr; struct ib_send_wr *cur_send_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0; int wqe_cnt = 0;
int ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
...@@ -369,37 +420,33 @@ int ehca_post_send(struct ib_qp *qp, ...@@ -369,37 +420,33 @@ int ehca_post_send(struct ib_qp *qp,
/* LOCK the QUEUE */ /* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, flags); spin_lock_irqsave(&my_qp->spinlock_s, flags);
/* Send an empty extra RDMA read if:
* 1) there has been an RDMA read on this connection before
* 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
* 3) we can be sure that any previous extra RDMA read has been
* processed so we don't overflow the SQ
*/
if (unlikely(my_qp->unsol_ack_circ &&
my_qp->packet_count > ACK_CIRC_THRESHOLD &&
my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
/* insert an empty RDMA READ to fix up the remote QP state */
struct ib_send_wr circ_wr;
memset(&circ_wr, 0, sizeof(circ_wr));
circ_wr.opcode = IB_WR_RDMA_READ;
post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
wqe_cnt++;
ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
my_qp->message_count = my_qp->packet_count = 0;
}
/* loop processes list of send reqs */ /* loop processes list of send reqs */
for (cur_send_wr = send_wr; cur_send_wr != NULL; for (cur_send_wr = send_wr; cur_send_wr != NULL;
cur_send_wr = cur_send_wr->next) { cur_send_wr = cur_send_wr->next) {
u64 start_offset = my_qp->ipz_squeue.current_q_offset; ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
if (bad_send_wr)
*bad_send_wr = cur_send_wr;
if (wqe_cnt == 0) {
ret = -ENOMEM;
ehca_err(qp->device, "Too many posted WQEs "
"qp_num=%x", qp->qp_num);
}
goto post_send_exit0;
}
/* write a SEND WQE into the QUEUE */
ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
/*
* if something failed,
* reset the free entry pointer to the start value
*/
if (unlikely(ret)) { if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset; /* if one or more WQEs were successful, don't fail */
*bad_send_wr = cur_send_wr; if (wqe_cnt)
if (wqe_cnt == 0) { ret = 0;
ret = -EINVAL;
ehca_err(qp->device, "Could not write WQE "
"qp_num=%x", qp->qp_num);
}
goto post_send_exit0; goto post_send_exit0;
} }
wqe_cnt++; wqe_cnt++;
...@@ -410,6 +457,7 @@ int ehca_post_send(struct ib_qp *qp, ...@@ -410,6 +457,7 @@ int ehca_post_send(struct ib_qp *qp,
post_send_exit0: post_send_exit0:
iosync(); /* serialize GAL register access */ iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt); hipz_update_sqa(my_qp, wqe_cnt);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags); spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret; return ret;
} }
......
...@@ -40,11 +40,8 @@ ...@@ -40,11 +40,8 @@
*/ */
#include <linux/module.h>
#include <linux/err.h>
#include "ehca_classes.h" #include "ehca_classes.h"
#include "ehca_tools.h" #include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h" #include "ehca_iverbs.h"
#include "hcp_if.h" #include "hcp_if.h"
...@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca, ...@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
return H_PARAMETER; return H_PARAMETER;
} }
if (ehca_nr_ports < 0) /* autodetect mode */
return H_SUCCESS;
for (counter = 0; for (counter = 0;
shca->sport[port - 1].port_state != IB_PORT_ACTIVE && shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
counter < ehca_port_act_time; counter < ehca_port_act_time;
......
...@@ -81,6 +81,16 @@ ...@@ -81,6 +81,16 @@
#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ #define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */ #define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
/*
* These 3 values (SDR and DDR may be ORed for auto-speed
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
* with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
* are also the the possible values for ipath_link_speed_enabled and active
* The values were chosen to match values used within the IB spec.
*/
#define IPATH_IB_SDR 1
#define IPATH_IB_DDR 2
/* /*
* stats maintained by the driver. For now, at least, this is global * stats maintained by the driver. For now, at least, this is global
* to all minor devices. * to all minor devices.
...@@ -433,8 +443,9 @@ struct ipath_user_info { ...@@ -433,8 +443,9 @@ struct ipath_user_info {
#define IPATH_CMD_UNUSED_2 26 #define IPATH_CMD_UNUSED_2 26
#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ #define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */ #define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
#define IPATH_CMD_MAX 28 #define IPATH_CMD_MAX 29
/* /*
* Poll types * Poll types
...@@ -477,6 +488,8 @@ struct ipath_cmd { ...@@ -477,6 +488,8 @@ struct ipath_cmd {
__u64 port_info; __u64 port_info;
/* enable/disable receipt of packets */ /* enable/disable receipt of packets */
__u32 recv_ctrl; __u32 recv_ctrl;
/* enable/disable armlaunch errors (non-zero to enable) */
__u32 armlaunch_ctrl;
/* partition key to set */ /* partition key to set */
__u16 part_key; __u16 part_key;
/* user address of __u32 bitmask of active slaves */ /* user address of __u32 bitmask of active slaves */
...@@ -579,7 +592,7 @@ struct ipath_flash { ...@@ -579,7 +592,7 @@ struct ipath_flash {
struct infinipath_counters { struct infinipath_counters {
__u64 LBIntCnt; __u64 LBIntCnt;
__u64 LBFlowStallCnt; __u64 LBFlowStallCnt;
__u64 Reserved1; __u64 TxSDmaDescCnt; /* was Reserved1 */
__u64 TxUnsupVLErrCnt; __u64 TxUnsupVLErrCnt;
__u64 TxDataPktCnt; __u64 TxDataPktCnt;
__u64 TxFlowPktCnt; __u64 TxFlowPktCnt;
...@@ -615,12 +628,26 @@ struct infinipath_counters { ...@@ -615,12 +628,26 @@ struct infinipath_counters {
__u64 RxP6HdrEgrOvflCnt; __u64 RxP6HdrEgrOvflCnt;
__u64 RxP7HdrEgrOvflCnt; __u64 RxP7HdrEgrOvflCnt;
__u64 RxP8HdrEgrOvflCnt; __u64 RxP8HdrEgrOvflCnt;
__u64 Reserved6; __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
__u64 Reserved7; __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
__u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
__u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
__u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
__u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
__u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
__u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
__u64 IBStatusChangeCnt; __u64 IBStatusChangeCnt;
__u64 IBLinkErrRecoveryCnt; __u64 IBLinkErrRecoveryCnt;
__u64 IBLinkDownedCnt; __u64 IBLinkDownedCnt;
__u64 IBSymbolErrCnt; __u64 IBSymbolErrCnt;
/* The following are new for IBA7220 */
__u64 RxVL15DroppedPktCnt;
__u64 RxOtherLocalPhyErrCnt;
__u64 PcieRetryBufDiagQwordCnt;
__u64 ExcessBufferOvflCnt;
__u64 LocalLinkIntegrityErrCnt;
__u64 RxVlErrCnt;
__u64 RxDlidFltrCnt;
}; };
/* /*
......
...@@ -421,7 +421,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) ...@@ -421,7 +421,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
else else
n = head - tail; n = head - tail;
if (unlikely((u32)cqe < n)) { if (unlikely((u32)cqe < n)) {
ret = -EOVERFLOW; ret = -EINVAL;
goto bail_unlock; goto bail_unlock;
} }
for (n = 0; tail != head; n++) { for (n = 0; tail != head; n++) {
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#define __IPATH_PKTDBG 0x80 /* print packet data */ #define __IPATH_PKTDBG 0x80 /* print packet data */
/* print process startup (init)/exit messages */ /* print process startup (init)/exit messages */
#define __IPATH_PROCDBG 0x100 #define __IPATH_PROCDBG 0x100
/* print mmap/nopage stuff, not using VDBG any more */ /* print mmap/fault stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x200 #define __IPATH_MMDBG 0x200
#define __IPATH_ERRPKTDBG 0x400 #define __IPATH_ERRPKTDBG 0x400
#define __IPATH_USER_SEND 0x1000 /* use user mode send */ #define __IPATH_USER_SEND 0x1000 /* use user mode send */
...@@ -81,7 +81,7 @@ ...@@ -81,7 +81,7 @@
#define __IPATH_VERBDBG 0x0 /* very verbose debug */ #define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */ #define __IPATH_PKTDBG 0x0 /* print packet data */
#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */ #define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
/* print mmap/nopage stuff, not using VDBG any more */ /* print mmap/fault stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0 #define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
......
This diff is collapsed.
...@@ -510,10 +510,10 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset, ...@@ -510,10 +510,10 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
{ {
int ret; int ret;
ret = down_interruptible(&dd->ipath_eep_sem); ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
if (!ret) { if (!ret) {
ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len); ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
} }
return ret; return ret;
...@@ -524,10 +524,10 @@ int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset, ...@@ -524,10 +524,10 @@ int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
{ {
int ret; int ret;
ret = down_interruptible(&dd->ipath_eep_sem); ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
if (!ret) { if (!ret) {
ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len); ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
} }
return ret; return ret;
...@@ -574,7 +574,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd) ...@@ -574,7 +574,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
struct ipath_devdata *dd0 = ipath_lookup(0); struct ipath_devdata *dd0 = ipath_lookup(0);
if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) { if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
u8 *bguid, oguid; u8 oguid;
dd->ipath_guid = dd0->ipath_guid; dd->ipath_guid = dd0->ipath_guid;
bguid = (u8 *) & dd->ipath_guid; bguid = (u8 *) & dd->ipath_guid;
...@@ -616,9 +616,9 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd) ...@@ -616,9 +616,9 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
goto bail; goto bail;
} }
down(&dd->ipath_eep_sem); mutex_lock(&dd->ipath_eep_lock);
eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len); eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
if (eep_stat) { if (eep_stat) {
ipath_dev_err(dd, "Failed reading GUID from eeprom\n"); ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
...@@ -674,7 +674,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd) ...@@ -674,7 +674,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
* elsewhere for backward-compatibility. * elsewhere for backward-compatibility.
*/ */
char *snp = dd->ipath_serial; char *snp = dd->ipath_serial;
int len;
memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
snp[sizeof ifp->if_sprefix] = '\0'; snp[sizeof ifp->if_sprefix] = '\0';
len = strlen(snp); len = strlen(snp);
...@@ -764,14 +763,14 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd) ...@@ -764,14 +763,14 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
/* Grab semaphore and read current EEPROM. If we get an /* Grab semaphore and read current EEPROM. If we get an
* error, let go, but if not, keep it until we finish write. * error, let go, but if not, keep it until we finish write.
*/ */
ret = down_interruptible(&dd->ipath_eep_sem); ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
if (ret) { if (ret) {
ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n"); ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
goto free_bail; goto free_bail;
} }
ret = ipath_eeprom_internal_read(dd, 0, buf, len); ret = ipath_eeprom_internal_read(dd, 0, buf, len);
if (ret) { if (ret) {
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
ipath_dev_err(dd, "Unable read EEPROM for logging\n"); ipath_dev_err(dd, "Unable read EEPROM for logging\n");
goto free_bail; goto free_bail;
} }
...@@ -779,7 +778,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd) ...@@ -779,7 +778,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
csum = flash_csum(ifp, 0); csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) { if (csum != ifp->if_csum) {
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
csum, ifp->if_csum); csum, ifp->if_csum);
ret = 1; ret = 1;
...@@ -849,7 +848,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd) ...@@ -849,7 +848,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
csum = flash_csum(ifp, 1); csum = flash_csum(ifp, 1);
ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1); ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
} }
up(&dd->ipath_eep_sem); mutex_unlock(&dd->ipath_eep_lock);
if (ret) if (ret)
ipath_dev_err(dd, "Failed updating EEPROM\n"); ipath_dev_err(dd, "Failed updating EEPROM\n");
......
...@@ -169,7 +169,7 @@ static int ipath_get_base_info(struct file *fp, ...@@ -169,7 +169,7 @@ static int ipath_get_base_info(struct file *fp,
kinfo->spi_piocnt = dd->ipath_pbufsport; kinfo->spi_piocnt = dd->ipath_pbufsport;
kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->spi_piobufbase = (u64) pd->port_piobufs;
kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_palign * pd->port_port; dd->ipath_ureg_align * pd->port_port;
} else if (master) { } else if (master) {
kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
(dd->ipath_pbufsport % subport_cnt); (dd->ipath_pbufsport % subport_cnt);
...@@ -186,7 +186,7 @@ static int ipath_get_base_info(struct file *fp, ...@@ -186,7 +186,7 @@ static int ipath_get_base_info(struct file *fp,
} }
if (shared) { if (shared) {
kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_palign * pd->port_port; dd->ipath_ureg_align * pd->port_port;
kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
...@@ -742,11 +742,12 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, ...@@ -742,11 +742,12 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
* updated and correct itself, even in the face of software * updated and correct itself, even in the face of software
* bugs. * bugs.
*/ */
*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0; if (pd->port_rcvhdrtail_kvaddr)
set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, ipath_clear_rcvhdrtail(pd);
set_bit(dd->ipath_r_portenable_shift + pd->port_port,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
} else } else
clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
...@@ -881,7 +882,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd) ...@@ -881,7 +882,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
egrcnt = dd->ipath_rcvegrcnt; egrcnt = dd->ipath_rcvegrcnt;
/* TID number offset for this port */ /* TID number offset for this port */
egroff = pd->port_port * egrcnt; egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
egrsize = dd->ipath_rcvegrbufsize; egrsize = dd->ipath_rcvegrbufsize;
ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
"offset %x, egrsize %u\n", egrcnt, egroff, egrsize); "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
...@@ -1049,11 +1050,6 @@ static int mmap_piobufs(struct vm_area_struct *vma, ...@@ -1049,11 +1050,6 @@ static int mmap_piobufs(struct vm_area_struct *vma,
phys = dd->ipath_physaddr + piobufs; phys = dd->ipath_physaddr + piobufs;
/*
* Don't mark this as non-cached, or we don't get the
* write combining behavior we want on the PIO buffers!
*/
#if defined(__powerpc__) #if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */ /* There isn't a generic way to specify writethrough mappings */
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
...@@ -1120,33 +1116,24 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma, ...@@ -1120,33 +1116,24 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
} }
/* /*
* ipath_file_vma_nopage - handle a VMA page fault. * ipath_file_vma_fault - handle a VMA page fault.
*/ */
static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma, static int ipath_file_vma_fault(struct vm_area_struct *vma,
unsigned long address, int *type) struct vm_fault *vmf)
{ {
unsigned long offset = address - vma->vm_start; struct page *page;
struct page *page = NOPAGE_SIGBUS;
void *pageptr;
/* page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
* Convert the vmalloc address into a struct page.
*/
pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
page = vmalloc_to_page(pageptr);
if (!page) if (!page)
goto out; return VM_FAULT_SIGBUS;
/* Increment the reference count. */
get_page(page); get_page(page);
if (type) vmf->page = page;
*type = VM_FAULT_MINOR;
out: return 0;
return page;
} }
static struct vm_operations_struct ipath_file_vm_ops = { static struct vm_operations_struct ipath_file_vm_ops = {
.nopage = ipath_file_vma_nopage, .fault = ipath_file_vma_fault,
}; };
static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
...@@ -1284,7 +1271,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) ...@@ -1284,7 +1271,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
goto bail; goto bail;
} }
ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
if (!pd->port_subport_cnt) { if (!pd->port_subport_cnt) {
/* port is not shared */ /* port is not shared */
piocnt = dd->ipath_pbufsport; piocnt = dd->ipath_pbufsport;
...@@ -1400,7 +1387,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd, ...@@ -1400,7 +1387,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
pollflag = ipath_poll_hdrqfull(pd); pollflag = ipath_poll_hdrqfull(pd);
head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr; if (pd->port_rcvhdrtail_kvaddr)
tail = ipath_get_rcvhdrtail(pd);
else
tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
if (head != tail) if (head != tail)
pollflag |= POLLIN | POLLRDNORM; pollflag |= POLLIN | POLLRDNORM;
...@@ -1410,7 +1400,7 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd, ...@@ -1410,7 +1400,7 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
/* flush waiting flag so we don't miss an event */ /* flush waiting flag so we don't miss an event */
wmb(); wmb();
set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT, set_bit(pd->port_port + dd->ipath_r_intravail_shift,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
...@@ -1790,6 +1780,7 @@ static int find_shared_port(struct file *fp, ...@@ -1790,6 +1780,7 @@ static int find_shared_port(struct file *fp,
} }
port_fp(fp) = pd; port_fp(fp) = pd;
subport_fp(fp) = pd->port_cnt++; subport_fp(fp) = pd->port_cnt++;
pd->port_subpid[subport_fp(fp)] = current->pid;
tidcursor_fp(fp) = 0; tidcursor_fp(fp) = 0;
pd->active_slaves |= 1 << subport_fp(fp); pd->active_slaves |= 1 << subport_fp(fp);
ipath_cdbg(PROC, ipath_cdbg(PROC,
...@@ -1920,8 +1911,7 @@ static int ipath_do_user_init(struct file *fp, ...@@ -1920,8 +1911,7 @@ static int ipath_do_user_init(struct file *fp,
*/ */
head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
dd->ipath_lastegrheads[pd->port_port] = -1; pd->port_lastrcvhdrqtail = -1;
dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
pd->port_port, head32); pd->port_port, head32);
pd->port_tidcursor = 0; /* start at beginning after open */ pd->port_tidcursor = 0; /* start at beginning after open */
...@@ -1941,11 +1931,13 @@ static int ipath_do_user_init(struct file *fp, ...@@ -1941,11 +1931,13 @@ static int ipath_do_user_init(struct file *fp,
* We explictly set the in-memory copy to 0 beforehand, so we don't * We explictly set the in-memory copy to 0 beforehand, so we don't
* have to wait to be sure the DMA update has happened. * have to wait to be sure the DMA update has happened.
*/ */
*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL; if (pd->port_rcvhdrtail_kvaddr)
set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, ipath_clear_rcvhdrtail(pd);
set_bit(dd->ipath_r_portenable_shift + pd->port_port,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); dd->ipath_rcvctrl &
~(1ULL << dd->ipath_r_tailupd_shift));
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
/* Notify any waiting slaves */ /* Notify any waiting slaves */
...@@ -2022,6 +2014,7 @@ static int ipath_close(struct inode *in, struct file *fp) ...@@ -2022,6 +2014,7 @@ static int ipath_close(struct inode *in, struct file *fp)
* the slave(s) don't wait for receive data forever. * the slave(s) don't wait for receive data forever.
*/ */
pd->active_slaves &= ~(1 << fd->subport); pd->active_slaves &= ~(1 << fd->subport);
pd->port_subpid[fd->subport] = 0;
mutex_unlock(&ipath_mutex); mutex_unlock(&ipath_mutex);
goto bail; goto bail;
} }
...@@ -2054,9 +2047,9 @@ static int ipath_close(struct inode *in, struct file *fp) ...@@ -2054,9 +2047,9 @@ static int ipath_close(struct inode *in, struct file *fp)
if (dd->ipath_kregbase) { if (dd->ipath_kregbase) {
int i; int i;
/* atomically clear receive enable port and intr avail. */ /* atomically clear receive enable port and intr avail. */
clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port, clear_bit(dd->ipath_r_portenable_shift + port,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
clear_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT, clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
...@@ -2149,11 +2142,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd, ...@@ -2149,11 +2142,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
static int ipath_force_pio_avail_update(struct ipath_devdata *dd) static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
{ {
u64 reg = dd->ipath_sendctrl; unsigned long flags;
clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
return 0; return 0;
} }
...@@ -2227,6 +2224,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, ...@@ -2227,6 +2224,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
dest = &cmd.cmd.poll_type; dest = &cmd.cmd.poll_type;
src = &ucmd->cmd.poll_type; src = &ucmd->cmd.poll_type;
break; break;
case IPATH_CMD_ARMLAUNCH_CTRL:
copy = sizeof(cmd.cmd.armlaunch_ctrl);
dest = &cmd.cmd.armlaunch_ctrl;
src = &ucmd->cmd.armlaunch_ctrl;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
...@@ -2302,6 +2304,12 @@ static ssize_t ipath_write(struct file *fp, const char __user *data, ...@@ -2302,6 +2304,12 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
case IPATH_CMD_POLL_TYPE: case IPATH_CMD_POLL_TYPE:
pd->poll_type = cmd.cmd.poll_type; pd->poll_type = cmd.cmd.poll_type;
break; break;
case IPATH_CMD_ARMLAUNCH_CTRL:
if (cmd.cmd.armlaunch_ctrl)
ipath_enable_armlaunch(pd->port_dd);
else
ipath_disable_armlaunch(pd->port_dd);
break;
} }
if (ret >= 0) if (ret >= 0)
......
...@@ -108,21 +108,16 @@ static const struct file_operations atomic_stats_ops = { ...@@ -108,21 +108,16 @@ static const struct file_operations atomic_stats_ops = {
.read = atomic_stats_read, .read = atomic_stats_read,
}; };
#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
static ssize_t atomic_counters_read(struct file *file, char __user *buf, static ssize_t atomic_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
u64 counters[NUM_COUNTERS]; struct infinipath_counters counters;
u16 i;
struct ipath_devdata *dd; struct ipath_devdata *dd;
dd = file->f_path.dentry->d_inode->i_private; dd = file->f_path.dentry->d_inode->i_private;
dd->ipath_f_read_counters(dd, &counters);
for (i = 0; i < NUM_COUNTERS; i++) return simple_read_from_buffer(buf, count, ppos, &counters,
counters[i] = ipath_snap_cntr(dd, i);
return simple_read_from_buffer(buf, count, ppos, counters,
sizeof counters); sizeof counters);
} }
...@@ -243,8 +238,7 @@ static int create_device_files(struct super_block *sb, ...@@ -243,8 +238,7 @@ static int create_device_files(struct super_block *sb,
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit); snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
(struct file_operations *) &simple_dir_operations, &simple_dir_operations, dd);
dd);
if (ret) { if (ret) {
printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
goto bail; goto bail;
......
This diff is collapsed.
This diff is collapsed.
...@@ -91,7 +91,7 @@ static int create_port0_egr(struct ipath_devdata *dd) ...@@ -91,7 +91,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
struct ipath_skbinfo *skbinfo; struct ipath_skbinfo *skbinfo;
int ret; int ret;
egrcnt = dd->ipath_rcvegrcnt; egrcnt = dd->ipath_p0_rcvegrcnt;
skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
if (skbinfo == NULL) { if (skbinfo == NULL) {
...@@ -244,8 +244,7 @@ static int init_chip_first(struct ipath_devdata *dd, ...@@ -244,8 +244,7 @@ static int init_chip_first(struct ipath_devdata *dd,
* cfgports. We do still check and report a difference, if * cfgports. We do still check and report a difference, if
* not same (should be impossible). * not same (should be impossible).
*/ */
dd->ipath_portcnt = dd->ipath_f_config_ports(dd, ipath_cfgports);
ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
if (!ipath_cfgports) if (!ipath_cfgports)
dd->ipath_cfgports = dd->ipath_portcnt; dd->ipath_cfgports = dd->ipath_portcnt;
else if (ipath_cfgports <= dd->ipath_portcnt) { else if (ipath_cfgports <= dd->ipath_portcnt) {
...@@ -272,22 +271,7 @@ static int init_chip_first(struct ipath_devdata *dd, ...@@ -272,22 +271,7 @@ static int init_chip_first(struct ipath_devdata *dd,
goto done; goto done;
} }
dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
* dd->ipath_cfgports,
GFP_KERNEL);
dd->ipath_lastrcvhdrqtails =
kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
* dd->ipath_cfgports, GFP_KERNEL);
if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
ipath_dev_err(dd, "Unable to allocate head arrays, "
"failing\n");
ret = -ENOMEM;
goto done;
}
pd = create_portdata0(dd); pd = create_portdata0(dd);
if (!pd) { if (!pd) {
ipath_dev_err(dd, "Unable to allocate portdata for port " ipath_dev_err(dd, "Unable to allocate portdata for port "
"0, failing\n"); "0, failing\n");
...@@ -345,10 +329,10 @@ static int init_chip_first(struct ipath_devdata *dd, ...@@ -345,10 +329,10 @@ static int init_chip_first(struct ipath_devdata *dd,
dd->ipath_piobcnt2k, dd->ipath_pio2kbase); dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
spin_lock_init(&dd->ipath_tid_lock); spin_lock_init(&dd->ipath_tid_lock);
spin_lock_init(&dd->ipath_sendctrl_lock);
spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_gpio_lock);
spin_lock_init(&dd->ipath_eep_st_lock); spin_lock_init(&dd->ipath_eep_st_lock);
sema_init(&dd->ipath_eep_sem, 1); mutex_init(&dd->ipath_eep_lock);
done: done:
*pdp = pd; *pdp = pd;
...@@ -372,9 +356,9 @@ static int init_chip_reset(struct ipath_devdata *dd, ...@@ -372,9 +356,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
*pdp = dd->ipath_pd[0]; *pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */ /* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
if (dd->ipath_portcnt != rtmp) if (dd->ipath_portcnt != rtmp)
...@@ -487,6 +471,7 @@ static void enable_chip(struct ipath_devdata *dd, ...@@ -487,6 +471,7 @@ static void enable_chip(struct ipath_devdata *dd,
struct ipath_portdata *pd, int reinit) struct ipath_portdata *pd, int reinit)
{ {
u32 val; u32 val;
unsigned long flags;
int i; int i;
if (!reinit) if (!reinit)
...@@ -495,19 +480,21 @@ static void enable_chip(struct ipath_devdata *dd, ...@@ -495,19 +480,21 @@ static void enable_chip(struct ipath_devdata *dd,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
/* Enable PIO send, and update of PIOavail regs to memory. */ /* Enable PIO send, and update of PIOavail regs to memory. */
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
INFINIPATH_S_PIOBUFAVAILUPD; INFINIPATH_S_PIOBUFAVAILUPD;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/* /*
* enable port 0 receive, and receive interrupt. other ports * enable port 0 receive, and receive interrupt. other ports
* done as user opens and inits them. * done as user opens and inits them.
*/ */
dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD | dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
(1ULL << INFINIPATH_R_PORTENABLE_SHIFT) | (1ULL << dd->ipath_r_portenable_shift) |
(1ULL << INFINIPATH_R_INTRAVAIL_SHIFT); (1ULL << dd->ipath_r_intravail_shift);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
...@@ -523,12 +510,11 @@ static void enable_chip(struct ipath_devdata *dd, ...@@ -523,12 +510,11 @@ static void enable_chip(struct ipath_devdata *dd,
*/ */
val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
/* Initialize so we interrupt on next packet received */ /* Initialize so we interrupt on next packet received */
(void)ipath_write_ureg(dd, ur_rcvhdrhead, (void)ipath_write_ureg(dd, ur_rcvhdrhead,
dd->ipath_rhdrhead_intr_off | dd->ipath_rhdrhead_intr_off |
dd->ipath_port0head, 0); dd->ipath_pd[0]->port_head, 0);
/* /*
* by now pioavail updates to memory should have occurred, so * by now pioavail updates to memory should have occurred, so
...@@ -542,12 +528,8 @@ static void enable_chip(struct ipath_devdata *dd, ...@@ -542,12 +528,8 @@ static void enable_chip(struct ipath_devdata *dd,
/* /*
* Chip Errata bug 6641; even and odd qwords>3 are swapped. * Chip Errata bug 6641; even and odd qwords>3 are swapped.
*/ */
if (i > 3) { if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
if (i & 1) val = dd->ipath_pioavailregs_dma[i ^ 1];
val = dd->ipath_pioavailregs_dma[i - 1];
else
val = dd->ipath_pioavailregs_dma[i + 1];
}
else else
val = dd->ipath_pioavailregs_dma[i]; val = dd->ipath_pioavailregs_dma[i];
dd->ipath_pioavailshadow[i] = le64_to_cpu(val); dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
...@@ -690,12 +672,13 @@ static int init_housekeeping(struct ipath_devdata *dd, ...@@ -690,12 +672,13 @@ static int init_housekeeping(struct ipath_devdata *dd,
*/ */
int ipath_init_chip(struct ipath_devdata *dd, int reinit) int ipath_init_chip(struct ipath_devdata *dd, int reinit)
{ {
int ret = 0, i; int ret = 0;
u32 val32, kpiobufs; u32 val32, kpiobufs;
u32 piobufs, uports; u32 piobufs, uports;
u64 val; u64 val;
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
gfp_t gfp_flags = GFP_USER | __GFP_COMP; gfp_t gfp_flags = GFP_USER | __GFP_COMP;
unsigned long flags;
ret = init_housekeeping(dd, &pd, reinit); ret = init_housekeeping(dd, &pd, reinit);
if (ret) if (ret)
...@@ -746,7 +729,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) ...@@ -746,7 +729,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
kpiobufs = ipath_kpiobufs; kpiobufs = ipath_kpiobufs;
if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
i = (int) piobufs - int i = (int) piobufs -
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT); (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
if (i < 0) if (i < 0)
i = 0; i = 0;
...@@ -827,8 +810,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) ...@@ -827,8 +810,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
INFINIPATH_S_PIOENABLE); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/* /*
* before error clears, since we expect serdes pll errors during * before error clears, since we expect serdes pll errors during
......
...@@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
for (i = 0; i < dd->ipath_cfgports; i++) { for (i = 0; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i]; struct ipath_portdata *pd = dd->ipath_pd[i];
if (i == 0) { if (i == 0) {
hd = dd->ipath_port0head; hd = pd->port_head;
tl = (u32) le64_to_cpu( tl = (u32) le64_to_cpu(
*dd->ipath_hdrqtailptr); *dd->ipath_hdrqtailptr);
} else if (pd && pd->port_cnt && } else if (pd && pd->port_cnt &&
...@@ -693,7 +693,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -693,7 +693,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* except kernel * except kernel
*/ */
tl = *(u64 *) pd->port_rcvhdrtail_kvaddr; tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
if (tl == dd->ipath_lastrcvhdrqtails[i]) if (tl == pd->port_lastrcvhdrqtail)
continue; continue;
hd = ipath_read_ureg32(dd, ur_rcvhdrhead, hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
i); i);
...@@ -703,7 +703,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -703,7 +703,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
(!hd && tl == dd->ipath_hdrqlast)) { (!hd && tl == dd->ipath_hdrqlast)) {
if (i == 0) if (i == 0)
chkerrpkts = 1; chkerrpkts = 1;
dd->ipath_lastrcvhdrqtails[i] = tl; pd->port_lastrcvhdrqtail = tl;
pd->port_hdrqfull++; pd->port_hdrqfull++;
/* flush hdrqfull so that poll() sees it */ /* flush hdrqfull so that poll() sees it */
wmb(); wmb();
...@@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
} }
} }
if (errs & INFINIPATH_E_RRCVEGRFULL) { if (errs & INFINIPATH_E_RRCVEGRFULL) {
struct ipath_portdata *pd = dd->ipath_pd[0];
/* /*
* since this is of less importance and not likely to * since this is of less importance and not likely to
* happen without also getting hdrfull, only count * happen without also getting hdrfull, only count
...@@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* vs user) * vs user)
*/ */
ipath_stats.sps_etidfull++; ipath_stats.sps_etidfull++;
if (dd->ipath_port0head != if (pd->port_head !=
(u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
chkerrpkts = 1; chkerrpkts = 1;
} }
...@@ -795,6 +797,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -795,6 +797,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
{ {
int i, im; int i, im;
__le64 val; __le64 val;
unsigned long flags;
/* disable error interrupts, to avoid confusion */ /* disable error interrupts, to avoid confusion */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
...@@ -813,11 +816,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -813,11 +816,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
dd->ipath_control); dd->ipath_control);
/* ensure pio avail updates continue */ /* ensure pio avail updates continue */
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD); dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl); dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/* /*
* We just enabled pioavailupdate, so dma copy is almost certainly * We just enabled pioavailupdate, so dma copy is almost certainly
...@@ -825,8 +831,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -825,8 +831,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
*/ */
for (i = 0; i < dd->ipath_pioavregs; i++) { for (i = 0; i < dd->ipath_pioavregs; i++) {
/* deal with 6110 chip bug */ /* deal with 6110 chip bug */
im = i > 3 ? ((i&1) ? i-1 : i+1) : i; im = i > 3 ? i ^ 1 : i;
val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im); val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
= le64_to_cpu(val); = le64_to_cpu(val);
} }
...@@ -849,7 +855,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd) ...@@ -849,7 +855,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
/* this is separate to allow for better optimization of ipath_intr() */ /* this is separate to allow for better optimization of ipath_intr() */
static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
{ {
/* /*
* sometimes happen during driver init and unload, don't want * sometimes happen during driver init and unload, don't want
...@@ -877,7 +883,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) ...@@ -877,7 +883,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
dd->ipath_f_free_irq(dd); dd->ipath_f_free_irq(dd);
} }
} }
if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
ipath_dev_err(dd, "%u unexpected interrupts, " ipath_dev_err(dd, "%u unexpected interrupts, "
"disabling interrupts completely\n", "disabling interrupts completely\n",
*unexpectp); *unexpectp);
...@@ -892,7 +898,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) ...@@ -892,7 +898,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
"ignoring\n"); "ignoring\n");
} }
static void ipath_bad_regread(struct ipath_devdata *dd) static noinline void ipath_bad_regread(struct ipath_devdata *dd)
{ {
static int allbits; static int allbits;
...@@ -920,31 +926,9 @@ static void ipath_bad_regread(struct ipath_devdata *dd) ...@@ -920,31 +926,9 @@ static void ipath_bad_regread(struct ipath_devdata *dd)
} }
} }
static void handle_port_pioavail(struct ipath_devdata *dd)
{
u32 i;
/*
* start from port 1, since for now port 0 is never using
* wait_event for PIO
*/
for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
if (pd && pd->port_cnt &&
dd->ipath_portpiowait & (1U << i)) {
clear_bit(i, &dd->ipath_portpiowait);
if (test_bit(IPATH_PORT_WAITING_PIO,
&pd->port_flag)) {
clear_bit(IPATH_PORT_WAITING_PIO,
&pd->port_flag);
wake_up_interruptible(&pd->port_wait);
}
}
}
}
static void handle_layer_pioavail(struct ipath_devdata *dd) static void handle_layer_pioavail(struct ipath_devdata *dd)
{ {
unsigned long flags;
int ret; int ret;
ret = ipath_ib_piobufavail(dd->verbs_dev); ret = ipath_ib_piobufavail(dd->verbs_dev);
...@@ -953,9 +937,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) ...@@ -953,9 +937,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
return; return;
set: set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl); dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
} }
/* /*
...@@ -969,7 +956,15 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) ...@@ -969,7 +956,15 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
int i; int i;
int rcvdint = 0; int rcvdint = 0;
/* test_bit below needs this... */ /*
* test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
* test_and_clear_bit(IPATH_PORT_WAITING_URG) below
* would both like timely updates of the bits so that
* we don't pass them by unnecessarily. the rmb()
* here ensures that we see them promptly -- the
* corresponding wmb()'s are in ipath_poll_urgent()
* and ipath_poll_next()...
*/
rmb(); rmb();
portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
dd->ipath_i_rcvavail_mask) dd->ipath_i_rcvavail_mask)
...@@ -980,7 +975,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) ...@@ -980,7 +975,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
if (portr & (1 << i) && pd && pd->port_cnt) { if (portr & (1 << i) && pd && pd->port_cnt) {
if (test_and_clear_bit(IPATH_PORT_WAITING_RCV, if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
&pd->port_flag)) { &pd->port_flag)) {
clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT, clear_bit(i + dd->ipath_r_intravail_shift,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
wake_up_interruptible(&pd->port_wait); wake_up_interruptible(&pd->port_wait);
rcvdint = 1; rcvdint = 1;
...@@ -1039,7 +1034,7 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1039,7 +1034,7 @@ irqreturn_t ipath_intr(int irq, void *data)
goto bail; goto bail;
} }
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
if (unlikely(!istat)) { if (unlikely(!istat)) {
ipath_stats.sps_nullintr++; ipath_stats.sps_nullintr++;
...@@ -1180,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1180,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data)
* for receive are at the bottom. * for receive are at the bottom.
*/ */
if (chk0rcv) { if (chk0rcv) {
ipath_kreceive(dd); ipath_kreceive(dd->ipath_pd[0]);
istat &= ~port0rbits; istat &= ~port0rbits;
} }
...@@ -1191,12 +1186,14 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1191,12 +1186,14 @@ irqreturn_t ipath_intr(int irq, void *data)
handle_urcv(dd, istat); handle_urcv(dd, istat);
if (istat & INFINIPATH_I_SPIOBUFAVAIL) { if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl); dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
if (dd->ipath_portpiowait) spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
handle_port_pioavail(dd);
handle_layer_pioavail(dd); handle_layer_pioavail(dd);
} }
......
This diff is collapsed.
...@@ -128,9 +128,8 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, ...@@ -128,9 +128,8 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
int ret; int ret;
/* /*
* We use LKEY == zero to mean a physical kmalloc() address. * We use LKEY == zero for kernel virtual addresses
* This is a bit of a hack since we rely on dma_map_single() * (see ipath_get_dma_mr and ipath_dma.c).
* being reversible by calling bus_to_virt().
*/ */
if (sge->lkey == 0) { if (sge->lkey == 0) {
struct ipath_pd *pd = to_ipd(qp->ibqp.pd); struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
......
This diff is collapsed.
...@@ -387,8 +387,8 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) ...@@ -387,8 +387,8 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
struct ib_wc wc; struct ib_wc wc;
int ret = 0; int ret = 0;
ipath_dbg("QP%d/%d in error state\n", ipath_dbg("QP%d/%d in error state (%d)\n",
qp->ibqp.qp_num, qp->remote_qpn); qp->ibqp.qp_num, qp->remote_qpn, err);
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */ /* XXX What if its already removed by the timeout code? */
...@@ -855,8 +855,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ...@@ -855,8 +855,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
* See ipath_mmap() for details. * See ipath_mmap() for details.
*/ */
if (udata && udata->outlen >= sizeof(__u64)) { if (udata && udata->outlen >= sizeof(__u64)) {
int err;
if (!qp->r_rq.wq) { if (!qp->r_rq.wq) {
__u64 offset = 0; __u64 offset = 0;
......
...@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp)
queue_ack: queue_ack:
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
dev->n_rc_qacks++;
qp->s_flags |= IPATH_S_ACK_PENDING; qp->s_flags |= IPATH_S_ACK_PENDING;
qp->s_nak_state = qp->r_nak_state; qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn; qp->s_ack_psn = qp->r_ack_psn;
...@@ -798,11 +799,13 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) ...@@ -798,11 +799,13 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
static inline void update_last_psn(struct ipath_qp *qp, u32 psn) static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
{ {
if (qp->s_wait_credit) { if (qp->s_last_psn != psn) {
qp->s_wait_credit = 0; qp->s_last_psn = psn;
tasklet_hi_schedule(&qp->s_task); if (qp->s_wait_credit) {
qp->s_wait_credit = 0;
tasklet_hi_schedule(&qp->s_task);
}
} }
qp->s_last_psn = psn;
} }
/** /**
...@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ...@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
case OP(SEND_FIRST): case OP(SEND_FIRST):
if (!ipath_get_rwqe(qp, 0)) { if (!ipath_get_rwqe(qp, 0)) {
rnr_nak: rnr_nak:
/*
* A RNR NAK will ACK earlier sends and RDMA writes.
* Don't queue the NAK if a RDMA read or atomic
* is pending though.
*/
if (qp->r_nak_state)
goto done;
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
goto send_ack; goto send_ack;
......
This diff is collapsed.
...@@ -94,8 +94,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -94,8 +94,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
/** /**
* ipath_create_srq - create a shared receive queue * ipath_create_srq - create a shared receive queue
* @ibpd: the protection domain of the SRQ to create * @ibpd: the protection domain of the SRQ to create
* @attr: the attributes of the SRQ * @srq_init_attr: the attributes of the SRQ
* @udata: not used by the InfiniPath verbs driver * @udata: data from libipathverbs when creating a user SRQ
*/ */
struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,5 +8,5 @@ config INFINIBAND_ISER ...@@ -8,5 +8,5 @@ config INFINIBAND_ISER
that speak iSCSI over iSER over InfiniBand. that speak iSCSI over iSER over InfiniBand.
The iSER protocol is defined by IETF. The iSER protocol is defined by IETF.
See <http://www.ietf.org/internet-drafts/draft-ietf-ips-iser-05.txt> See <http://www.ietf.org/rfc/rfc5046.txt>
and <http://www.infinibandta.org/members/spec/iser_annex_060418.pdf> and <http://www.infinibandta.org/members/spec/Annex_iSER.PDF>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment