Commit a8472b4b authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kvack.org/~bcrl/aio-next

Pull AIO leak fixes from Ben LaHaise:
 "I've put these two patches plus Linus's change through a round of
  tests, and it passes millions of iterations of the aio numa
  migratepage test, as well as a number of repetitions of a few simple
  read and write tests.

  The first patch fixes the memory leak Kent introduced, while the
  second patch makes aio_migratepage() much more paranoid and robust"

* git://git.kvack.org/~bcrl/aio-next:
  aio/migratepages: make aio migrate pages sane
  aio: fix kioctx leak introduced by "aio: Fix a trinity splat"
parents 3dc9acb6 8e321fef
...@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx) ...@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
int i; int i;
for (i = 0; i < ctx->nr_pages; i++) { for (i = 0; i < ctx->nr_pages; i++) {
struct page *page;
pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
page_count(ctx->ring_pages[i])); page_count(ctx->ring_pages[i]));
put_page(ctx->ring_pages[i]); page = ctx->ring_pages[i];
if (!page)
continue;
ctx->ring_pages[i] = NULL;
put_page(page);
} }
put_aio_ring_file(ctx); put_aio_ring_file(ctx);
...@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, ...@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
unsigned long flags; unsigned long flags;
int rc; int rc;
rc = 0;
/* Make sure the old page hasn't already been changed */
spin_lock(&mapping->private_lock);
ctx = mapping->private_data;
if (ctx) {
pgoff_t idx;
spin_lock_irqsave(&ctx->completion_lock, flags);
idx = old->index;
if (idx < (pgoff_t)ctx->nr_pages) {
if (ctx->ring_pages[idx] != old)
rc = -EAGAIN;
} else
rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else
rc = -EINVAL;
spin_unlock(&mapping->private_lock);
if (rc != 0)
return rc;
/* Writeback must be complete */ /* Writeback must be complete */
BUG_ON(PageWriteback(old)); BUG_ON(PageWriteback(old));
put_page(old); get_page(new);
rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
if (rc != MIGRATEPAGE_SUCCESS) { if (rc != MIGRATEPAGE_SUCCESS) {
get_page(old); put_page(new);
return rc; return rc;
} }
get_page(new);
/* We can potentially race against kioctx teardown here. Use the /* We can potentially race against kioctx teardown here. Use the
* address_space's private data lock to protect the mapping's * address_space's private data lock to protect the mapping's
* private_data. * private_data.
...@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, ...@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
migrate_page_copy(new, old); migrate_page_copy(new, old);
idx = old->index; idx = old->index;
if (idx < (pgoff_t)ctx->nr_pages) if (idx < (pgoff_t)ctx->nr_pages) {
ctx->ring_pages[idx] = new; /* And only do the move if things haven't changed */
if (ctx->ring_pages[idx] == old)
ctx->ring_pages[idx] = new;
else
rc = -EAGAIN;
} else
rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else } else
rc = -EBUSY; rc = -EBUSY;
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
if (rc == MIGRATEPAGE_SUCCESS)
put_page(old);
else
put_page(new);
return rc; return rc;
} }
#endif #endif
...@@ -640,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -640,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
aio_nr += ctx->max_reqs; aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock); spin_unlock(&aio_nr_lock);
percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
err = ioctx_add_table(ctx, mm); err = ioctx_add_table(ctx, mm);
if (err) if (err)
......
...@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, ...@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page); struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping, extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode); struct buffer_head *head, enum migrate_mode mode,
int extra_count);
#else #else
static inline void putback_lru_pages(struct list_head *l) {} static inline void putback_lru_pages(struct list_head *l) {}
......
...@@ -317,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, ...@@ -317,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
*/ */
int migrate_page_move_mapping(struct address_space *mapping, int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode) struct buffer_head *head, enum migrate_mode mode,
int extra_count)
{ {
int expected_count = 0; int expected_count = 1 + extra_count;
void **pslot; void **pslot;
if (!mapping) { if (!mapping) {
/* Anonymous page without mapping */ /* Anonymous page without mapping */
if (page_count(page) != 1) if (page_count(page) != expected_count)
return -EAGAIN; return -EAGAIN;
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
...@@ -334,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -334,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree, pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page)); page_index(page));
expected_count = 2 + page_has_private(page); expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count || if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
...@@ -584,7 +585,7 @@ int migrate_page(struct address_space *mapping, ...@@ -584,7 +585,7 @@ int migrate_page(struct address_space *mapping,
BUG_ON(PageWriteback(page)); /* Writeback must be complete */ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
return rc; return rc;
...@@ -611,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping, ...@@ -611,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
head = page_buffers(page); head = page_buffers(page);
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment