Commit f0b117b0 authored by Benjamin LaHaise's avatar Benjamin LaHaise

several minor bugfixes for the aio core

parent 524b6ab3
...@@ -232,10 +232,12 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -232,10 +232,12 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
if (aio_setup_ring(ctx) < 0) if (aio_setup_ring(ctx) < 0)
goto out_freectx; goto out_freectx;
/* now link into global list. kludge. FIXME */ /* limit the number of system wide aios */
atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */
if (unlikely(atomic_read(&aio_nr) > aio_max_nr)) if (unlikely(atomic_read(&aio_nr) > aio_max_nr))
goto out_cleanup; goto out_cleanup;
/* now link into global list. kludge. FIXME */
write_lock(&mm->ioctx_list_lock); write_lock(&mm->ioctx_list_lock);
ctx->next = mm->ioctx_list; ctx->next = mm->ioctx_list;
mm->ioctx_list = ctx; mm->ioctx_list = ctx;
...@@ -377,28 +379,37 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx) ...@@ -377,28 +379,37 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
{ {
struct kiocb *req = NULL; struct kiocb *req = NULL;
struct aio_ring *ring; struct aio_ring *ring;
int okay = 0;
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
if (unlikely(!req)) if (unlikely(!req))
return NULL; return NULL;
req->ki_users = 1;
req->ki_key = 0;
req->ki_ctx = ctx;
req->ki_cancel = NULL;
req->ki_user_obj = NULL;
/* Check if the completion queue has enough free space to /* Check if the completion queue has enough free space to
* accept an event from this io. * accept an event from this io.
*/ */
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0); ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
if (likely(ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring))) { if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
list_add(&req->ki_list, &ctx->active_reqs); list_add(&req->ki_list, &ctx->active_reqs);
get_ioctx(ctx); get_ioctx(ctx);
ctx->reqs_active++; ctx->reqs_active++;
req->ki_user_obj = NULL; okay = 1;
req->ki_ctx = ctx; }
req->ki_users = 1;
} else
kmem_cache_free(kiocb_cachep, req);
kunmap_atomic(ring, KM_USER0); kunmap_atomic(ring, KM_USER0);
spin_unlock_irq(&ctx->ctx_lock); spin_unlock_irq(&ctx->ctx_lock);
if (!okay) {
kmem_cache_free(kiocb_cachep, req);
req = NULL;
}
return req; return req;
} }
...@@ -540,7 +551,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2) ...@@ -540,7 +551,7 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
* case the usage count checks will have to move under ctx_lock * case the usage count checks will have to move under ctx_lock
* for all cases. * for all cases.
*/ */
if (ctx == &ctx->mm->default_kioctx) { if (is_sync_kiocb(iocb)) {
int ret; int ret;
iocb->ki_user_data = res; iocb->ki_user_data = res;
...@@ -979,7 +990,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb *user_iocb, ...@@ -979,7 +990,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb *user_iocb,
ret = -EINVAL; ret = -EINVAL;
} }
if (likely(EIOCBQUEUED == ret)) if (likely(-EIOCBQUEUED == ret))
return 0; return 0;
if (ret >= 0) { if (ret >= 0) {
aio_complete(req, ret, 0); aio_complete(req, ret, 0);
......
...@@ -42,6 +42,7 @@ struct kiocb { ...@@ -42,6 +42,7 @@ struct kiocb {
long private[KIOCB_PRIVATE_SIZE/sizeof(long)]; long private[KIOCB_PRIVATE_SIZE/sizeof(long)];
}; };
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
#define init_sync_kiocb(x, filp) \ #define init_sync_kiocb(x, filp) \
do { \ do { \
struct task_struct *tsk = current; \ struct task_struct *tsk = current; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment