Commit 28a9a9e8 authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Jason Gunthorpe

IB/hfi1: Remove race conditions in user_sdma send path

Packet queue state is over used to determine SDMA descriptor
availablitity and packet queue request state.

cpu 0  ret = user_sdma_send_pkts(req, pcount);
cpu 0  if (atomic_read(&pq->n_reqs))
cpu 1  IRQ user_sdma_txreq_cb calls pq_update() (state to _INACTIVE)
cpu 0        xchg(&pq->state, SDMA_PKT_Q_ACTIVE);

At this point pq->n_reqs == 0 and pq->state is incorrectly
SDMA_PKT_Q_ACTIVE.  The close path will hang waiting for the state
to return to _INACTIVE.

This can also change the state from _DEFERRED to _ACTIVE.  However,
this is a mostly benign race.

Remove the racy code path.

Use n_reqs to determine if a packet queue is active or not.
Reviewed-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a0e0cb82
...@@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, ...@@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->ctxt = uctxt->ctxt; pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt; pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size; pq->n_max_reqs = hfi1_sdma_comp_ring_size;
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0); atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait); init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0); atomic_set(&pq->n_locked, 0);
...@@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, ...@@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */ /* Wait until all requests have been freed. */
wait_event_interruptible( wait_event_interruptible(
pq->wait, pq->wait,
(READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); !atomic_read(&pq->n_reqs));
kfree(pq->reqs); kfree(pq->reqs);
kfree(pq->req_in_use); kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache); kmem_cache_destroy(pq->txreq_cache);
...@@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid) ...@@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash]; return mapping[hash];
} }
/**
* hfi1_user_sdma_process_request() - Process and start a user sdma request
* @fd: valid file descriptor
* @iovec: array of io vectors to process
* @dim: overall iovec array size
* @count: number of io vector array entries processed
*/
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim, struct iovec *iovec, unsigned long dim,
unsigned long *count) unsigned long *count)
...@@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, ...@@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->ahg_idx = sdma_ahg_alloc(req->sde); req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */ /* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount); ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY)) if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req; goto free_req;
/*
* It is possible that the SDMA engine would have processed all the
* submitted packets by the time we get here. Therefore, only set
* packet queue state to ACTIVE if there are still uncompleted
* requests.
*/
if (atomic_read(&pq->n_reqs))
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
/* /*
* This is a somewhat blocking send implementation. * This is a somewhat blocking send implementation.
* The driver will block the caller until all packets of the * The driver will block the caller until all packets of the
...@@ -1409,10 +1407,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) ...@@ -1409,10 +1407,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{ {
if (atomic_dec_and_test(&pq->n_reqs)) { if (atomic_dec_and_test(&pq->n_reqs))
xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
wake_up(&pq->wait); wake_up(&pq->wait);
}
} }
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
......
...@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size, ...@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
#define SDMA_PKT_Q_INACTIVE BIT(0) enum pkt_q_sdma_state {
#define SDMA_PKT_Q_ACTIVE BIT(1) SDMA_PKT_Q_ACTIVE,
#define SDMA_PKT_Q_DEFERRED BIT(2) SDMA_PKT_Q_DEFERRED,
};
/* /*
* Maximum retry attempts to submit a TX request * Maximum retry attempts to submit a TX request
...@@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q { ...@@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs; struct user_sdma_request *reqs;
unsigned long *req_in_use; unsigned long *req_in_use;
struct iowait busy; struct iowait busy;
unsigned state; enum pkt_q_sdma_state state;
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned long unpinned; unsigned long unpinned;
struct mmu_rb_handler *handler; struct mmu_rb_handler *handler;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment