Commit d9c7d394 authored by Nikanth Karthikesan's avatar Nikanth Karthikesan Committed by Jens Axboe

block: prevent possible io_context->refcount overflow

Currently io_context has an atomic_t(32-bit) as refcount.  In the case of
cfq, for each device against whcih a task does I/O, a reference to the
io_context would be taken.  And when there are multiple process sharing
io_contexts(CLONE_IO) would also have a reference to the same io_context.

Theoretically the possible maximum number of processes sharing the same
io_context + the number of disks/cfq_data referring to the same io_context
can overflow the 32-bit counter on a very high-end machine.

Even though it is an improbable case, let us make it atomic_long_t.
Signed-off-by: default avatarNikanth Karthikesan <knikanth@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 1d589bb1
...@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc) ...@@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
if (ioc == NULL) if (ioc == NULL)
return 1; return 1;
BUG_ON(atomic_read(&ioc->refcount) == 0); BUG_ON(atomic_long_read(&ioc->refcount) == 0);
if (atomic_dec_and_test(&ioc->refcount)) { if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock(); rcu_read_lock();
if (ioc->aic && ioc->aic->dtor) if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic); ioc->aic->dtor(ioc->aic);
...@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ...@@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) { if (ret) {
atomic_set(&ret->refcount, 1); atomic_long_set(&ret->refcount, 1);
atomic_set(&ret->nr_tasks, 1); atomic_set(&ret->nr_tasks, 1);
spin_lock_init(&ret->lock); spin_lock_init(&ret->lock);
ret->ioprio_changed = 0; ret->ioprio_changed = 0;
...@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node) ...@@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
ret = current_io_context(gfp_flags, node); ret = current_io_context(gfp_flags, node);
if (unlikely(!ret)) if (unlikely(!ret))
break; break;
} while (!atomic_inc_not_zero(&ret->refcount)); } while (!atomic_long_inc_not_zero(&ret->refcount));
return ret; return ret;
} }
...@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) ...@@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
struct io_context *dst = *pdst; struct io_context *dst = *pdst;
if (src) { if (src) {
BUG_ON(atomic_read(&src->refcount) == 0); BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_inc(&src->refcount); atomic_long_inc(&src->refcount);
put_io_context(dst); put_io_context(dst);
*pdst = src; *pdst = src;
} }
......
...@@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (!cfqd->active_cic) { if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq); struct cfq_io_context *cic = RQ_CIC(rq);
atomic_inc(&cic->ioc->refcount); atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic; cfqd->active_cic = cic;
} }
} }
......
...@@ -64,7 +64,7 @@ struct cfq_io_context { ...@@ -64,7 +64,7 @@ struct cfq_io_context {
* and kmalloc'ed. These could be shared between processes. * and kmalloc'ed. These could be shared between processes.
*/ */
struct io_context { struct io_context {
atomic_t refcount; atomic_long_t refcount;
atomic_t nr_tasks; atomic_t nr_tasks;
/* all the fields below are protected by this lock */ /* all the fields below are protected by this lock */
...@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) ...@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's * if ref count is zero, don't allow sharing (ioc is going away, it's
* a race). * a race).
*/ */
if (ioc && atomic_inc_not_zero(&ioc->refcount)) { if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
atomic_inc(&ioc->nr_tasks); atomic_long_inc(&ioc->refcount);
return ioc; return ioc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment