Commit 1471cb6c authored by Ira Weiny's avatar Ira Weiny Committed by Roland Dreier

IB/mad: Add user space RMPP support

Using the new registration mechanism, define a flag that indicates the
user wishes to process RMPP messages in user space rather than have
the kernel process them.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 0f29b46d
...@@ -283,6 +283,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -283,6 +283,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error1; goto error1;
} }
} }
/* Make sure class supplied is consistent with QP type */ /* Make sure class supplied is consistent with QP type */
if (qp_type == IB_QPT_SMI) { if (qp_type == IB_QPT_SMI) {
if ((mad_reg_req->mgmt_class != if ((mad_reg_req->mgmt_class !=
...@@ -309,6 +310,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -309,6 +310,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* No registration request supplied */ /* No registration request supplied */
if (!send_handler) if (!send_handler)
goto error1; goto error1;
if (registration_flags & IB_MAD_USER_RMPP)
goto error1;
} }
/* Validate device and port */ /* Validate device and port */
...@@ -907,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, ...@@ -907,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
return 0; return 0;
} }
int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent)
{
return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index,
int rmpp_active, int rmpp_active,
...@@ -923,9 +932,11 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -923,9 +932,11 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
pad = get_pad_size(hdr_len, data_len); pad = get_pad_size(hdr_len, data_len);
message_size = hdr_len + data_len + pad; message_size = hdr_len + data_len + pad;
if ((!mad_agent->rmpp_version && if (ib_mad_kernel_rmpp_agent(mad_agent)) {
(rmpp_active || message_size > sizeof(struct ib_mad))) || if (!rmpp_active && message_size > sizeof(struct ib_mad))
(!rmpp_active && message_size > sizeof(struct ib_mad))) return ERR_PTR(-EINVAL);
} else
if (rmpp_active || message_size > sizeof(struct ib_mad))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
size = rmpp_active ? hdr_len : sizeof(struct ib_mad); size = rmpp_active ? hdr_len : sizeof(struct ib_mad);
...@@ -1180,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, ...@@ -1180,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
&mad_agent_priv->send_list); &mad_agent_priv->send_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
ret = ib_send_rmpp_mad(mad_send_wr); ret = ib_send_rmpp_mad(mad_send_wr);
if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
ret = ib_send_mad(mad_send_wr); ret = ib_send_mad(mad_send_wr);
...@@ -1730,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1730,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
return !mad_agent_priv->agent.rmpp_version || return !mad_agent_priv->agent.rmpp_version ||
!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE) || IB_MGMT_RMPP_FLAG_ACTIVE) ||
(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
...@@ -1857,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1857,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
mad_recv_wc); mad_recv_wc);
if (!mad_recv_wc) { if (!mad_recv_wc) {
...@@ -1872,10 +1884,25 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1872,10 +1884,25 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) { if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
&& ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
& IB_MGMT_RMPP_FLAG_ACTIVE)) {
/* user rmpp is in effect
* and this is an active RMPP MAD
*/
mad_recv_wc->wc->wr_id = 0;
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc);
atomic_dec(&mad_agent_priv->refcount);
} else {
/* not user rmpp, revert to normal behavior and
* drop the mad */
ib_free_recv_mad(mad_recv_wc); ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv); deref_mad_agent(mad_agent_priv);
return; return;
} }
} else {
ib_mark_mad_done(mad_send_wr); ib_mark_mad_done(mad_send_wr);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
...@@ -1889,6 +1916,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1889,6 +1916,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
mad_send_wc.vendor_err = 0; mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &mad_send_wr->send_buf; mad_send_wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
}
} else { } else {
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
mad_recv_wc); mad_recv_wc);
...@@ -2128,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -2128,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
mad_agent_priv = mad_send_wr->mad_agent_priv; mad_agent_priv = mad_send_wr->mad_agent_priv;
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
if (mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
if (ret == IB_RMPP_RESULT_CONSUMED) if (ret == IB_RMPP_RESULT_CONSUMED)
goto done; goto done;
...@@ -2524,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -2524,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
ret = ib_retry_rmpp(mad_send_wr); ret = ib_retry_rmpp(mad_send_wr);
switch (ret) { switch (ret) {
case IB_RMPP_RESULT_UNHANDLED: case IB_RMPP_RESULT_UNHANDLED:
......
...@@ -506,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -506,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
copy_offset = IB_MGMT_MAD_HDR; if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
rmpp_active = 0; && ib_mad_kernel_rmpp_agent(agent)) {
} else {
copy_offset = IB_MGMT_RMPP_HDR; copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE; IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
rmpp_active = 0;
} }
data_len = count - hdr_size(file) - hdr_len; data_len = count - hdr_size(file) - hdr_len;
...@@ -558,6 +560,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -558,6 +560,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rmpp_mad->mad_hdr.tid = *tid; rmpp_mad->mad_hdr.tid = *tid;
} }
if (!ib_mad_kernel_rmpp_agent(agent)
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
} else {
spin_lock_irq(&file->send_lock); spin_lock_irq(&file->send_lock);
ret = is_duplicate(file, packet); ret = is_duplicate(file, packet);
if (!ret) if (!ret)
...@@ -567,6 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -567,6 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ret = -EINVAL; ret = -EINVAL;
goto err_msg; goto err_msg;
} }
}
ret = ib_post_send_mad(packet->msg, NULL); ret = ib_post_send_mad(packet->msg, NULL);
if (ret) if (ret)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>
/* Management base version */ /* Management base version */
#define IB_MGMT_BASE_VERSION 1 #define IB_MGMT_BASE_VERSION 1
...@@ -359,6 +360,9 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, ...@@ -359,6 +360,9 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @port_num: Port number on which QP is registered * @port_num: Port number on which QP is registered
* @rmpp_version: If set, indicates the RMPP version used by this agent. * @rmpp_version: If set, indicates the RMPP version used by this agent.
*/ */
enum {
IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
};
struct ib_mad_agent { struct ib_mad_agent {
struct ib_device *device; struct ib_device *device;
struct ib_qp *qp; struct ib_qp *qp;
...@@ -666,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num); ...@@ -666,4 +670,11 @@ void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
*/ */
void ib_free_send_mad(struct ib_mad_send_buf *send_buf); void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
/**
* ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
* @agent: the agent in question
* @return: true if agent is performing rmpp, false otherwise.
*/
int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent);
#endif /* IB_MAD_H */ #endif /* IB_MAD_H */
...@@ -213,7 +213,10 @@ struct ib_user_mad_reg_req { ...@@ -213,7 +213,10 @@ struct ib_user_mad_reg_req {
* used. * used.
* @rmpp_version - If set, indicates the RMPP version to use. * @rmpp_version - If set, indicates the RMPP version to use.
*/ */
#define IB_USER_MAD_REG_FLAGS_CAP (0) enum {
IB_USER_MAD_USER_RMPP = (1 << 0),
};
#define IB_USER_MAD_REG_FLAGS_CAP (IB_USER_MAD_USER_RMPP)
struct ib_user_mad_reg_req2 { struct ib_user_mad_reg_req2 {
__u32 id; __u32 id;
__u32 qpn; __u32 qpn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment