Commit 2527e681 authored by Sean Hefty's avatar Sean Hefty Committed by Roland Dreier

IB/mad: Validate MADs for spec compliance

Validate MADs sent by userspace clients for spec compliance with
C13-18.1.1 (prevent duplicate requests and responses sent on the
same port).  Without this, RMPP transactions get aborted because
of duplicate packets.

This patch is similar to that provided by Jack Morgenstein.
Signed-off-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: default avatarJack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 16c59419
...@@ -167,6 +167,15 @@ static int is_vendor_method_in_use( ...@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
return 0; return 0;
} }
int ib_response_mad(struct ib_mad *mad)
{
return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
(mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
(mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
}
EXPORT_SYMBOL(ib_response_mad);
/* /*
* ib_register_mad_agent - Register to send/receive MADs * ib_register_mad_agent - Register to send/receive MADs
*/ */
...@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) ...@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
} }
EXPORT_SYMBOL(ib_unregister_mad_agent); EXPORT_SYMBOL(ib_unregister_mad_agent);
static inline int response_mad(struct ib_mad *mad)
{
/* Trap represses are responses although response bit is reset */
return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
(mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
}
static void dequeue_mad(struct ib_mad_list_head *mad_list) static void dequeue_mad(struct ib_mad_list_head *mad_list)
{ {
struct ib_mad_queue *mad_queue; struct ib_mad_queue *mad_queue;
...@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, ...@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
switch (ret) switch (ret)
{ {
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
if (response_mad(&mad_priv->mad.mad) && if (ib_response_mad(&mad_priv->mad.mad) &&
mad_agent_priv->agent.recv_handler) { mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv; local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv; local->recv_mad_agent = mad_agent_priv;
...@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, ...@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&port_priv->reg_lock, flags); spin_lock_irqsave(&port_priv->reg_lock, flags);
if (response_mad(mad)) { if (ib_response_mad(mad)) {
u32 hi_tid; u32 hi_tid;
struct ib_mad_agent_private *entry; struct ib_mad_agent_private *entry;
...@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, ...@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
} }
/* Complete corresponding request */ /* Complete corresponding request */
if (response_mad(mad_recv_wc->recv_buf.mad)) { if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
spin_lock_irqsave(&mad_agent_priv->lock, flags); spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) { if (!mad_send_wr) {
......
...@@ -112,8 +112,10 @@ struct ib_umad_device { ...@@ -112,8 +112,10 @@ struct ib_umad_device {
struct ib_umad_file { struct ib_umad_file {
struct ib_umad_port *port; struct ib_umad_port *port;
struct list_head recv_list; struct list_head recv_list;
struct list_head send_list;
struct list_head port_list; struct list_head port_list;
spinlock_t recv_lock; spinlock_t recv_lock;
spinlock_t send_lock;
wait_queue_head_t recv_wait; wait_queue_head_t recv_wait;
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
int agents_dead; int agents_dead;
...@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file, ...@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
return ret; return ret;
} }
static void dequeue_send(struct ib_umad_file *file,
struct ib_umad_packet *packet)
{
spin_lock_irq(&file->send_lock);
list_del(&packet->list);
spin_unlock_irq(&file->send_lock);
}
static void send_handler(struct ib_mad_agent *agent, static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *send_wc) struct ib_mad_send_wc *send_wc)
{ {
struct ib_umad_file *file = agent->context; struct ib_umad_file *file = agent->context;
struct ib_umad_packet *packet = send_wc->send_buf->context[0]; struct ib_umad_packet *packet = send_wc->send_buf->context[0];
dequeue_send(file, packet);
ib_destroy_ah(packet->msg->ah); ib_destroy_ah(packet->msg->ah);
ib_free_send_mad(packet->msg); ib_free_send_mad(packet->msg);
...@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) ...@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
return 0; return 0;
} }
static int same_destination(struct ib_user_mad_hdr *hdr1,
struct ib_user_mad_hdr *hdr2)
{
if (!hdr1->grh_present && !hdr2->grh_present)
return (hdr1->lid == hdr2->lid);
if (hdr1->grh_present && hdr2->grh_present)
return !memcmp(hdr1->gid, hdr2->gid, 16);
return 0;
}
static int is_duplicate(struct ib_umad_file *file,
struct ib_umad_packet *packet)
{
struct ib_umad_packet *sent_packet;
struct ib_mad_hdr *sent_hdr, *hdr;
hdr = (struct ib_mad_hdr *) packet->mad.data;
list_for_each_entry(sent_packet, &file->send_list, list) {
sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
if ((hdr->tid != sent_hdr->tid) ||
(hdr->mgmt_class != sent_hdr->mgmt_class))
continue;
/*
* No need to be overly clever here. If two new operations have
* the same TID, reject the second as a duplicate. This is more
* restrictive than required by the spec.
*/
if (!ib_response_mad((struct ib_mad *) hdr)) {
if (!ib_response_mad((struct ib_mad *) sent_hdr))
return 1;
continue;
} else if (!ib_response_mad((struct ib_mad *) sent_hdr))
continue;
if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
return 1;
}
return 0;
}
static ssize_t ib_umad_write(struct file *filp, const char __user *buf, static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
...@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
struct ib_ah_attr ah_attr; struct ib_ah_attr ah_attr;
struct ib_ah *ah; struct ib_ah *ah;
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
u8 method;
__be64 *tid; __be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active; int ret, data_len, hdr_len, copy_offset, rmpp_active;
...@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
} }
/* /*
* If userspace is generating a request that will generate a * Set the high-order part of the transaction ID to make MADs from
* response, we need to make sure the high-order part of the * different agents unique, and allow routing responses back to the
* transaction ID matches the agent being used to send the * original requestor.
* MAD.
*/ */
method = ((struct ib_mad_hdr *) packet->msg->mad)->method; if (!ib_response_mad(packet->msg->mad)) {
if (!(method & IB_MGMT_METHOD_RESP) &&
method != IB_MGMT_METHOD_TRAP_REPRESS &&
method != IB_MGMT_METHOD_SEND) {
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff)); (be64_to_cpup(tid) & 0xffffffff));
rmpp_mad->mad_hdr.tid = *tid;
}
spin_lock_irq(&file->send_lock);
ret = is_duplicate(file, packet);
if (!ret)
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);
if (ret) {
ret = -EINVAL;
goto err_msg;
} }
ret = ib_post_send_mad(packet->msg, NULL); ret = ib_post_send_mad(packet->msg, NULL);
if (ret) if (ret)
goto err_msg; goto err_send;
up_read(&file->port->mutex); up_read(&file->port->mutex);
return count; return count;
err_send:
dequeue_send(file, packet);
err_msg: err_msg:
ib_free_send_mad(packet->msg); ib_free_send_mad(packet->msg);
err_ah: err_ah:
...@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp) ...@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
} }
spin_lock_init(&file->recv_lock); spin_lock_init(&file->recv_lock);
spin_lock_init(&file->send_lock);
INIT_LIST_HEAD(&file->recv_list); INIT_LIST_HEAD(&file->recv_list);
INIT_LIST_HEAD(&file->send_list);
init_waitqueue_head(&file->recv_wait); init_waitqueue_head(&file->recv_wait);
file->port = port; file->port = port;
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 #define IB_MGMT_METHOD_TRAP_REPRESS 0x07
#define IB_MGMT_METHOD_RESP 0x80 #define IB_MGMT_METHOD_RESP 0x80
#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
#define IB_MGMT_MAX_METHODS 128 #define IB_MGMT_MAX_METHODS 128
...@@ -246,6 +247,12 @@ struct ib_mad_send_buf { ...@@ -246,6 +247,12 @@ struct ib_mad_send_buf {
int retries; int retries;
}; };
/**
* ib_response_mad - Returns if the specified MAD has been generated in
* response to a sent request or trap.
*/
int ib_response_mad(struct ib_mad *mad);
/** /**
* ib_get_rmpp_resptime - Returns the RMPP response time. * ib_get_rmpp_resptime - Returns the RMPP response time.
* @rmpp_hdr: An RMPP header. * @rmpp_hdr: An RMPP header.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment