Commit 24acfc34 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: interface update for ioc/icq creation functions

Make the following interface updates to prepare for future ioc related
changes.

* create_io_context() returning ioc only works for %current because it
  doesn't increment ref on the ioc.  Drop @task parameter from it and
  always assume %current.

* Make create_io_context_slowpath() return 0 or -errno and rename it
  to create_task_io_context().

* Make ioc_create_icq() take @ioc as parameter instead of assuming
  that of %current.  The caller, get_request(), is updated to create
  ioc explicitly and then pass it into ioc_create_icq().
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b679281a
...@@ -855,7 +855,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -855,7 +855,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
*/ */
if (!ioc && !retried) { if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
create_io_context(current, gfp_mask, q->node); create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
retried = true; retried = true;
goto retry; goto retry;
...@@ -919,7 +919,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -919,7 +919,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
/* create icq if missing */ /* create icq if missing */
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
icq = ioc_create_icq(q, gfp_mask); ioc = create_io_context(gfp_mask, q->node);
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq) if (!icq)
goto fail_alloc; goto fail_alloc;
} }
...@@ -1005,7 +1007,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ...@@ -1005,7 +1007,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time. * up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching * See ioc_batching, ioc_set_batching
*/ */
create_io_context(current, GFP_NOIO, q->node); create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context); ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
......
...@@ -212,15 +212,14 @@ void ioc_clear_queue(struct request_queue *q) ...@@ -212,15 +212,14 @@ void ioc_clear_queue(struct request_queue *q)
} }
} }
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
int node)
{ {
struct io_context *ioc; struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node); node);
if (unlikely(!ioc)) if (unlikely(!ioc))
return; return -ENOMEM;
/* initialize */ /* initialize */
atomic_long_set(&ioc->refcount, 1); atomic_long_set(&ioc->refcount, 1);
...@@ -244,6 +243,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, ...@@ -244,6 +243,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
else else
kmem_cache_free(iocontext_cachep, ioc); kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task); task_unlock(task);
return 0;
} }
/** /**
...@@ -275,7 +276,7 @@ struct io_context *get_task_io_context(struct task_struct *task, ...@@ -275,7 +276,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc; return ioc;
} }
task_unlock(task); task_unlock(task);
} while (create_io_context(task, gfp_flags, node)); } while (!create_task_io_context(task, gfp_flags, node));
return NULL; return NULL;
} }
...@@ -319,26 +320,23 @@ EXPORT_SYMBOL(ioc_lookup_icq); ...@@ -319,26 +320,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/** /**
* ioc_create_icq - create and link io_cq * ioc_create_icq - create and link io_cq
* @ioc: io_context of interest
* @q: request_queue of interest * @q: request_queue of interest
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* *
* Make sure io_cq linking %current->io_context and @q exists. If either * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* io_context and/or icq don't exist, they will be created using @gfp_mask. * will be created using @gfp_mask.
* *
* The caller is responsible for ensuring @ioc won't go away and @q is * The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns. * alive and will stay alive until this function returns.
*/ */
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask)
{ {
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq; struct io_cq *icq;
/* allocate stuff */ /* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node); q->node);
if (!icq) if (!icq)
......
...@@ -200,32 +200,30 @@ static inline int blk_do_io_stat(struct request *rq) ...@@ -200,32 +200,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/ */
void get_io_context(struct io_context *ioc); void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q); void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
int node);
/** /**
* create_io_context - try to create task->io_context * create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* @node: allocation node * @node: allocation node
* *
* If @task->io_context is %NULL, allocate a new io_context and install it. * If %current->io_context is %NULL, allocate a new io_context and install
* Returns the current @task->io_context which may be %NULL if allocation * it. Returns the current %current->io_context which may be %NULL if
* failed. * allocation failed.
* *
* Note that this function can't be called with IRQ disabled because * Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe. * task_lock which protects %current->io_context is IRQ-unsafe.
*/ */
static inline struct io_context *create_io_context(struct task_struct *task, static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
gfp_t gfp_mask, int node)
{ {
WARN_ON_ONCE(irqs_disabled()); WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context)) if (unlikely(!current->io_context))
create_io_context_slowpath(task, gfp_mask, node); create_task_io_context(current, gfp_mask, node);
return task->io_context; return current->io_context;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment