Commit 13bd677a authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm cache: fix bugs when a GFP_NOWAIT allocation fails

GFP_NOWAIT allocation can fail anytime - it doesn't wait for memory being
available and it fails if the mempool is exhausted and there is not enough
memory.

If we go down this path:
  map_bio -> mg_start -> alloc_migration -> mempool_alloc(GFP_NOWAIT)
we can see that map_bio() doesn't check the return value of mg_start(),
and the bio is leaked.

If we go down this path:
  map_bio -> mg_start -> mg_lock_writes -> alloc_prison_cell ->
  dm_bio_prison_alloc_cell_v2 -> mempool_alloc(GFP_NOWAIT) ->
  mg_lock_writes -> mg_complete
the bio is ended with an error - it is unacceptable because it could
cause filesystem corruption if the machine ran out of memory
temporarily.

Change GFP_NOWAIT to GFP_NOIO, so that the mempool code will properly
wait until memory becomes available. mempool_alloc with GFP_NOIO can't
fail, so remove the code paths that deal with allocation failure.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b2155578
...@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache) ...@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
{ {
return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT); return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
} }
static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
...@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache) ...@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
{ {
struct dm_cache_migration *mg; struct dm_cache_migration *mg;
mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT); mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
if (!mg)
return NULL;
memset(mg, 0, sizeof(*mg)); memset(mg, 0, sizeof(*mg));
...@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi ...@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
struct dm_bio_prison_cell_v2 *cell_prealloc, *cell; struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
if (!cell_prealloc) {
defer_bio(cache, bio);
return false;
}
build_key(oblock, end, &key); build_key(oblock, end, &key);
r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
...@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg) ...@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc; struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache); prealloc = alloc_prison_cell(cache);
if (!prealloc) {
DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
mg_complete(mg, false);
return -ENOMEM;
}
/* /*
* Prevent writes to the block, but allow reads to continue. * Prevent writes to the block, but allow reads to continue.
...@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio ...@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
} }
mg = alloc_migration(cache); mg = alloc_migration(cache);
if (!mg) {
policy_complete_background_work(cache->policy, op, false);
background_work_end(cache);
return -ENOMEM;
}
mg->op = op; mg->op = op;
mg->overwrite_bio = bio; mg->overwrite_bio = bio;
...@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg) ...@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
struct dm_bio_prison_cell_v2 *prealloc; struct dm_bio_prison_cell_v2 *prealloc;
prealloc = alloc_prison_cell(cache); prealloc = alloc_prison_cell(cache);
if (!prealloc) {
invalidate_complete(mg, false);
return -ENOMEM;
}
build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
r = dm_cell_lock_v2(cache->prison, &key, r = dm_cell_lock_v2(cache->prison, &key,
...@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock, ...@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -EPERM; return -EPERM;
mg = alloc_migration(cache); mg = alloc_migration(cache);
if (!mg) {
background_work_end(cache);
return -ENOMEM;
}
mg->overwrite_bio = bio; mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock; mg->invalidate_cblock = cblock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment