Commit d55b5fda authored by Zach Brown's avatar Zach Brown Committed by Linus Torvalds

[PATCH] aio: remove aio_max_nr accounting race

AIO was adding a new context's max requests to the global total before
testing if that resulting total was over the global limit.  This let
innocent tasks get their new limit tested along with a racing guilty task
that was crossing the limit.  This serializes the _nr accounting with a
spinlock It also switches to using unsigned long for the global totals.
Individual contexts are still limited to an unsigned int's worth of
requests by the syscall interface.

The problem and fix were verified with a simple program that spun creating
and destroying a context while holding on to another long lived context.
Before the patch a task creating a tiny context could get a spurious EAGAIN
if it raced with a task creating a very large context that overran the
limit.
Signed-off-by: default avatarZach Brown <zach.brown@oracle.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0f6ed7c2
...@@ -42,8 +42,9 @@ ...@@ -42,8 +42,9 @@
#endif #endif
/*------ sysctl variables----*/ /*------ sysctl variables----*/
atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ static DEFINE_SPINLOCK(aio_nr_lock);
unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ unsigned long aio_nr; /* current system wide number of aio requests */
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/ /*----end sysctl variables---*/
static kmem_cache_t *kiocb_cachep; static kmem_cache_t *kiocb_cachep;
...@@ -208,7 +209,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -208,7 +209,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (nr_events > aio_max_nr) if ((unsigned long)nr_events > aio_max_nr)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
...@@ -233,8 +234,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -233,8 +234,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
goto out_freectx; goto out_freectx;
/* limit the number of system wide aios */ /* limit the number of system wide aios */
atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ spin_lock(&aio_nr_lock);
if (unlikely(atomic_read(&aio_nr) > aio_max_nr)) if (aio_nr + ctx->max_reqs > aio_max_nr ||
aio_nr + ctx->max_reqs < aio_nr)
ctx->max_reqs = 0;
else
aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock);
if (ctx->max_reqs == 0)
goto out_cleanup; goto out_cleanup;
/* now link into global list. kludge. FIXME */ /* now link into global list. kludge. FIXME */
...@@ -248,8 +255,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -248,8 +255,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
return ctx; return ctx;
out_cleanup: out_cleanup:
atomic_sub(ctx->max_reqs, &aio_nr);
ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */
__put_ioctx(ctx); __put_ioctx(ctx);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
...@@ -374,7 +379,12 @@ void fastcall __put_ioctx(struct kioctx *ctx) ...@@ -374,7 +379,12 @@ void fastcall __put_ioctx(struct kioctx *ctx)
pr_debug("__put_ioctx: freeing %p\n", ctx); pr_debug("__put_ioctx: freeing %p\n", ctx);
kmem_cache_free(kioctx_cachep, ctx); kmem_cache_free(kioctx_cachep, ctx);
atomic_sub(nr_events, &aio_nr); if (nr_events) {
spin_lock(&aio_nr_lock);
BUG_ON(aio_nr - nr_events > aio_nr);
aio_nr -= nr_events;
spin_unlock(&aio_nr_lock);
}
} }
/* aio_get_req /* aio_get_req
...@@ -1258,8 +1268,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp) ...@@ -1258,8 +1268,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
goto out; goto out;
ret = -EINVAL; ret = -EINVAL;
if (unlikely(ctx || (int)nr_events <= 0)) { if (unlikely(ctx || nr_events == 0)) {
pr_debug("EINVAL: io_setup: ctx or nr_events > max\n"); pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
ctx, nr_events);
goto out; goto out;
} }
......
...@@ -183,6 +183,7 @@ struct kioctx { ...@@ -183,6 +183,7 @@ struct kioctx {
struct list_head active_reqs; /* used for cancellation */ struct list_head active_reqs; /* used for cancellation */
struct list_head run_list; /* used for kicked reqs */ struct list_head run_list; /* used for kicked reqs */
/* sys_io_setup currently limits this to an unsigned int */
unsigned max_reqs; unsigned max_reqs;
struct aio_ring_info ring_info; struct aio_ring_info ring_info;
...@@ -234,7 +235,7 @@ static inline struct kiocb *list_kiocb(struct list_head *h) ...@@ -234,7 +235,7 @@ static inline struct kiocb *list_kiocb(struct list_head *h)
} }
/* for sysctl: */ /* for sysctl: */
extern atomic_t aio_nr; extern unsigned long aio_nr;
extern unsigned aio_max_nr; extern unsigned long aio_max_nr;
#endif /* __LINUX__AIO_H */ #endif /* __LINUX__AIO_H */
...@@ -952,7 +952,7 @@ static ctl_table fs_table[] = { ...@@ -952,7 +952,7 @@ static ctl_table fs_table[] = {
.data = &aio_nr, .data = &aio_nr,
.maxlen = sizeof(aio_nr), .maxlen = sizeof(aio_nr),
.mode = 0444, .mode = 0444,
.proc_handler = &proc_dointvec, .proc_handler = &proc_doulongvec_minmax,
}, },
{ {
.ctl_name = FS_AIO_MAX_NR, .ctl_name = FS_AIO_MAX_NR,
...@@ -960,7 +960,7 @@ static ctl_table fs_table[] = { ...@@ -960,7 +960,7 @@ static ctl_table fs_table[] = {
.data = &aio_max_nr, .data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr), .maxlen = sizeof(aio_max_nr),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec, .proc_handler = &proc_doulongvec_minmax,
}, },
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment