Commit 12f5b931 authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe

blk-mq: Remove generation seqeunce

This patch simplifies the timeout handling by relying on the request
reference counting to ensure the iterator is operating on an inflight
and truly timed out request. Since the reference counting prevents the
tag from being reallocated, the block layer no longer needs to prevent
drivers from completing their requests while the timeout handler is
operating on it: a driver completing a request is allowed to proceed to
the next state without additional syncronization with the block layer.

This also removes any need for generation sequence numbers since the
request lifetime is prevented from being reallocated as a new sequence
while timeout handling is operating on it.

To enables this a refcount is added to struct request so that request
users can be sure they're operating on the same request without it
changing while they're processing it.  The request's tag won't be
released for reuse until both the timeout handler and the completion
are done with it.
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
[hch: slight cleanups, added back submission side hctx lock, use cmpxchg
 for completions]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ad103e79
...@@ -198,12 +198,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -198,12 +198,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = -1; rq->internal_tag = -1;
rq->start_time_ns = ktime_get_ns(); rq->start_time_ns = ktime_get_ns();
rq->part = NULL; rq->part = NULL;
seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync);
/*
* See comment of blk_mq_init_request
*/
WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
} }
EXPORT_SYMBOL(blk_rq_init); EXPORT_SYMBOL(blk_rq_init);
......
...@@ -344,7 +344,6 @@ static const char *const rqf_name[] = { ...@@ -344,7 +344,6 @@ static const char *const rqf_name[] = {
RQF_NAME(STATS), RQF_NAME(STATS),
RQF_NAME(SPECIAL_PAYLOAD), RQF_NAME(SPECIAL_PAYLOAD),
RQF_NAME(ZONE_WRITE_LOCKED), RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(MQ_TIMEOUT_EXPIRED),
RQF_NAME(MQ_POLL_SLEPT), RQF_NAME(MQ_POLL_SLEPT),
}; };
#undef RQF_NAME #undef RQF_NAME
......
This diff is collapsed.
...@@ -30,20 +30,6 @@ struct blk_mq_ctx { ...@@ -30,20 +30,6 @@ struct blk_mq_ctx {
struct kobject kobj; struct kobject kobj;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/*
* Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
* and the upper bits the generation number.
*/
enum mq_rq_state {
MQ_RQ_IDLE = 0,
MQ_RQ_IN_FLIGHT = 1,
MQ_RQ_COMPLETE = 2,
MQ_RQ_STATE_BITS = 2,
MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS,
};
void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
...@@ -107,33 +93,9 @@ void blk_mq_release(struct request_queue *q); ...@@ -107,33 +93,9 @@ void blk_mq_release(struct request_queue *q);
* blk_mq_rq_state() - read the current MQ_RQ_* state of a request * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
* @rq: target request. * @rq: target request.
*/ */
static inline int blk_mq_rq_state(struct request *rq) static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
{ {
return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK; return READ_ONCE(rq->state);
}
/**
* blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
* @rq: target request.
* @state: new state to set.
*
* Set @rq's state to @state. The caller is responsible for ensuring that
* there are no other updaters. A request can transition into IN_FLIGHT
* only from IDLE and doing so increments the generation number.
*/
static inline void blk_mq_rq_update_state(struct request *rq,
enum mq_rq_state state)
{
u64 old_val = READ_ONCE(rq->gstate);
u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
if (state == MQ_RQ_IN_FLIGHT) {
WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
new_val += MQ_RQ_GEN_INC;
}
/* avoid exposing interim values */
WRITE_ONCE(rq->gstate, new_val);
} }
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
......
...@@ -214,7 +214,6 @@ void blk_add_timer(struct request *req) ...@@ -214,7 +214,6 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout; req->timeout = q->rq_timeout;
blk_rq_set_deadline(req, jiffies + req->timeout); blk_rq_set_deadline(req, jiffies + req->timeout);
req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/* /*
* Only the non-mq case needs to add the request to a protected list. * Only the non-mq case needs to add the request to a protected list.
......
...@@ -125,15 +125,22 @@ typedef __u32 __bitwise req_flags_t; ...@@ -125,15 +125,22 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */ /* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
/* timeout is expired */
#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* already slept for hybrid poll */ /* already slept for hybrid poll */
#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21)) #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
/* flags that prevent us from merging requests: */ /* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \ #define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
/*
* Request state for blk-mq.
*/
enum mq_rq_state {
MQ_RQ_IDLE = 0,
MQ_RQ_IN_FLIGHT = 1,
MQ_RQ_COMPLETE = 2,
};
/* /*
* Try to put the fields that are referenced together in the same cacheline. * Try to put the fields that are referenced together in the same cacheline.
* *
...@@ -236,26 +243,8 @@ struct request { ...@@ -236,26 +243,8 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */ unsigned int extra_len; /* length of alignment and padding */
/* enum mq_rq_state state;
* On blk-mq, the lower bits of ->gstate (generation number and refcount_t ref;
* state) carry the MQ_RQ_* state value and the upper bits the
* generation number which is monotonically incremented and used to
* distinguish the reuse instances.
*
* ->gstate_seq allows updates to ->gstate and other fields
* (currently ->deadline) during request start to be read
* atomically from the timeout path, so that it can operate on a
* coherent set of information.
*/
seqcount_t gstate_seq;
u64 gstate;
/*
* ->aborted_gstate is used by the timeout to claim a specific
* recycle instance of this request. See blk_mq_timeout_work().
*/
struct u64_stats_sync aborted_gstate_sync;
u64 aborted_gstate;
/* access through blk_rq_set_deadline, blk_rq_deadline */ /* access through blk_rq_set_deadline, blk_rq_deadline */
unsigned long __deadline; unsigned long __deadline;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment