Commit 2fabef4f authored by Ka-Cheong Poon's avatar Ka-Cheong Poon Committed by David S. Miller

net/rds: Fix MR reference counting problem

In rds_free_mr(), it calls rds_destroy_mr(mr) directly.  But this
defeats the purpose of reference counting and makes MR free handling
impossible.  It means that holding a reference does not guarantee that
it is safe to access some fields.  For example, In
rds_cmsg_rdma_dest(), it increases the ref count, unlocks and then
calls mr->r_trans->sync_mr().  But if rds_free_mr() (and
rds_destroy_mr()) is called in between (there is no lock preventing
this to happen), r_trans_private is set to NULL, causing a panic.
Similar issue is in rds_rdma_unuse().
Reported-by: default avatarzerons <sironhide0null@gmail.com>
Signed-off-by: default avatarKa-Cheong Poon <ka-cheong.poon@oracle.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e228a5d0
...@@ -101,9 +101,6 @@ static void rds_destroy_mr(struct rds_mr *mr) ...@@ -101,9 +101,6 @@ static void rds_destroy_mr(struct rds_mr *mr)
rdsdebug("RDS: destroy mr key is %x refcnt %u\n", rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, kref_read(&mr->r_kref)); mr->r_key, kref_read(&mr->r_kref));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags); spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node)) if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
...@@ -142,7 +139,6 @@ void rds_rdma_drop_keys(struct rds_sock *rs) ...@@ -142,7 +139,6 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node); RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
kref_put(&mr->r_kref, __rds_put_mr_final); kref_put(&mr->r_kref, __rds_put_mr_final);
spin_lock_irqsave(&rs->rs_rdma_lock, flags); spin_lock_irqsave(&rs->rs_rdma_lock, flags);
} }
...@@ -436,12 +432,6 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) ...@@ -436,12 +432,6 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
if (!mr) if (!mr)
return -EINVAL; return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
kref_put(&mr->r_kref, __rds_put_mr_final); kref_put(&mr->r_kref, __rds_put_mr_final);
return 0; return 0;
} }
...@@ -466,6 +456,14 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) ...@@ -466,6 +456,14 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
return; return;
} }
/* Get a reference so that the MR won't go away before calling
* sync_mr() below.
*/
kref_get(&mr->r_kref);
/* If it is going to be freed, remove it from the tree now so
* that no other thread can find it and free it.
*/
if (mr->r_use_once || force) { if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node); RB_CLEAR_NODE(&mr->r_rb_node);
...@@ -479,12 +477,13 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) ...@@ -479,12 +477,13 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
if (mr->r_trans->sync_mr) if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* Release the reference held above. */
kref_put(&mr->r_kref, __rds_put_mr_final);
/* If the MR was marked as invalidate, this will /* If the MR was marked as invalidate, this will
* trigger an async flush. */ * trigger an async flush. */
if (zot_me) { if (zot_me)
rds_destroy_mr(mr);
kref_put(&mr->r_kref, __rds_put_mr_final); kref_put(&mr->r_kref, __rds_put_mr_final);
}
} }
void rds_rdma_free_op(struct rm_rdma_op *ro) void rds_rdma_free_op(struct rm_rdma_op *ro)
......
...@@ -299,19 +299,11 @@ struct rds_mr { ...@@ -299,19 +299,11 @@ struct rds_mr {
unsigned int r_invalidate:1; unsigned int r_invalidate:1;
unsigned int r_write:1; unsigned int r_write:1;
/* This is for RDS_MR_DEAD.
* It would be nice & consistent to make this part of the above
* bit field here, but we need to use test_and_set_bit.
*/
unsigned long r_state;
struct rds_sock *r_sock; /* back pointer to the socket that owns us */ struct rds_sock *r_sock; /* back pointer to the socket that owns us */
struct rds_transport *r_trans; struct rds_transport *r_trans;
void *r_trans_private; void *r_trans_private;
}; };
/* Flags for mr->r_state */
#define RDS_MR_DEAD 0
static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset) static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
{ {
return r_key | (((u64) offset) << 32); return r_key | (((u64) offset) << 32);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment