Commit 998a5a63 authored by Sean Hefty's avatar Sean Hefty Committed by Linus Torvalds

[PATCH] IB: MAD cancel callbacks from thread

Modify ib_cancel_mad() to invoke a user's send completion callback from
a different thread context than that used by the caller.  This allows a
caller to hold a lock while calling cancel that is also acquired from
their send handler.
Signed-off-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarRoland Dreier <roland@topspin.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3af2e092
...@@ -68,6 +68,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); ...@@ -68,6 +68,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc); struct ib_mad_send_wc *mad_send_wc);
static void timeout_sends(void *data); static void timeout_sends(void *data);
static void cancel_sends(void *data);
static void local_completions(void *data); static void local_completions(void *data);
static int solicited_mad(struct ib_mad *mad); static int solicited_mad(struct ib_mad *mad);
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
...@@ -341,6 +342,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -341,6 +342,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
INIT_LIST_HEAD(&mad_agent_priv->local_list); INIT_LIST_HEAD(&mad_agent_priv->local_list);
INIT_WORK(&mad_agent_priv->local_work, local_completions, INIT_WORK(&mad_agent_priv->local_work, local_completions,
mad_agent_priv); mad_agent_priv);
INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
atomic_set(&mad_agent_priv->refcount, 1); atomic_set(&mad_agent_priv->refcount, 1);
init_waitqueue_head(&mad_agent_priv->wait); init_waitqueue_head(&mad_agent_priv->wait);
...@@ -2004,12 +2007,44 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, ...@@ -2004,12 +2007,44 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
return NULL; return NULL;
} }
void cancel_sends(void *data)
{
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
mad_agent_priv = data;
mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
mad_send_wc.vendor_err = 0;
spin_lock_irqsave(&mad_agent_priv->lock, flags);
while (!list_empty(&mad_agent_priv->canceled_list)) {
mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
struct ib_mad_send_wr_private,
agent_list);
list_del(&mad_send_wr->agent_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
mad_send_wc.wr_id = mad_send_wr->wr_id;
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
kfree(mad_send_wr);
if (atomic_dec_and_test(&mad_agent_priv->refcount))
wake_up(&mad_agent_priv->wait);
spin_lock_irqsave(&mad_agent_priv->lock, flags);
}
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
void ib_cancel_mad(struct ib_mad_agent *mad_agent, void ib_cancel_mad(struct ib_mad_agent *mad_agent,
u64 wr_id) u64 wr_id)
{ {
struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags; unsigned long flags;
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
...@@ -2031,19 +2066,12 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, ...@@ -2031,19 +2066,12 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
} }
list_del(&mad_send_wr->agent_list); list_del(&mad_send_wr->agent_list);
list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list);
adjust_timeout(mad_agent_priv); adjust_timeout(mad_agent_priv);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
mad_send_wc.status = IB_WC_WR_FLUSH_ERR; queue_work(mad_agent_priv->qp_info->port_priv->wq,
mad_send_wc.vendor_err = 0; &mad_agent_priv->canceled_work);
mad_send_wc.wr_id = mad_send_wr->wr_id;
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
kfree(mad_send_wr);
if (atomic_dec_and_test(&mad_agent_priv->refcount))
wake_up(&mad_agent_priv->wait);
out: out:
return; return;
} }
......
...@@ -95,6 +95,8 @@ struct ib_mad_agent_private { ...@@ -95,6 +95,8 @@ struct ib_mad_agent_private {
unsigned long timeout; unsigned long timeout;
struct list_head local_list; struct list_head local_list;
struct work_struct local_work; struct work_struct local_work;
struct list_head canceled_list;
struct work_struct canceled_work;
atomic_t refcount; atomic_t refcount;
wait_queue_head_t wait; wait_queue_head_t wait;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment