Commit 2dd9c257 authored by Joe Thornber's avatar Joe Thornber Committed by Alasdair G Kergon

dm thin: support read only external snapshot origins

Support the use of an external _read only_ device as an origin for a thin
device.

Any read to an unprovisioned area of the thin device will be passed
through to the origin.  Writes trigger allocation of new blocks as
usual.

One possible use case for this would be VM hosts that want to run
guests on thinly-provisioned volumes but have the base image on another
device (possibly shared between many VMs).
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent c4a69ecd
...@@ -169,6 +169,38 @@ ii) Using an internal snapshot. ...@@ -169,6 +169,38 @@ ii) Using an internal snapshot.
dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 1" dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 1"
External snapshots
------------------
You can use an external _read only_ device as an origin for a
thinly-provisioned volume. Any read to an unprovisioned area of the
thin device will be passed through to the origin. Writes trigger
the allocation of new blocks as usual.
One use case for this is VM hosts that want to run guests on
thinly-provisioned volumes but have the base image on another device
(possibly shared between many VMs).
You must not write to the origin device if you use this technique!
Of course, you may write to the thin device and take internal snapshots
of the thin volume.
i) Creating a snapshot of an external device
This is the same as creating a thin device.
You don't mention the origin at this stage.
dmsetup message /dev/mapper/pool 0 "create_thin 0"
ii) Using a snapshot of an external device.
Append an extra parameter to the thin target specifying the origin:
dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 0 /dev/image"
N.B. All descendants (internal snapshots) of this snapshot require the
same extra origin parameter.
Deactivation Deactivation
------------ ------------
...@@ -254,7 +286,7 @@ iii) Messages ...@@ -254,7 +286,7 @@ iii) Messages
i) Constructor i) Constructor
thin <pool dev> <dev id> thin <pool dev> <dev id> [<external origin dev>]
pool dev: pool dev:
the thin-pool device, e.g. /dev/mapper/my_pool or 253:0 the thin-pool device, e.g. /dev/mapper/my_pool or 253:0
...@@ -263,6 +295,11 @@ i) Constructor ...@@ -263,6 +295,11 @@ i) Constructor
the internal device identifier of the device to be the internal device identifier of the device to be
activated. activated.
external origin dev:
an optional block device outside the pool to be treated as a
read-only snapshot origin: reads to unprovisioned areas of the
thin target will be mapped to this device.
The pool doesn't store any size against the thin devices. If you The pool doesn't store any size against the thin devices. If you
load a thin target that is smaller than you've been using previously, load a thin target that is smaller than you've been using previously,
then you'll have no access to blocks mapped beyond the end. If you then you'll have no access to blocks mapped beyond the end. If you
......
...@@ -549,6 +549,7 @@ struct pool_c { ...@@ -549,6 +549,7 @@ struct pool_c {
*/ */
struct thin_c { struct thin_c {
struct dm_dev *pool_dev; struct dm_dev *pool_dev;
struct dm_dev *origin_dev;
dm_thin_id dev_id; dm_thin_id dev_id;
struct pool *pool; struct pool *pool;
...@@ -666,14 +667,16 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) ...@@ -666,14 +667,16 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
(bio->bi_sector & pool->offset_mask); (bio->bi_sector & pool->offset_mask);
} }
static void remap_and_issue(struct thin_c *tc, struct bio *bio, static void remap_to_origin(struct thin_c *tc, struct bio *bio)
dm_block_t block) {
bio->bi_bdev = tc->origin_dev->bdev;
}
static void issue(struct thin_c *tc, struct bio *bio)
{ {
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
unsigned long flags; unsigned long flags;
remap(tc, bio, block);
/* /*
* Batch together any FUA/FLUSH bios we find and then issue * Batch together any FUA/FLUSH bios we find and then issue
* a single commit for them in process_deferred_bios(). * a single commit for them in process_deferred_bios().
...@@ -686,6 +689,19 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio, ...@@ -686,6 +689,19 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
generic_make_request(bio); generic_make_request(bio);
} }
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
{
remap_to_origin(tc, bio);
issue(tc, bio);
}
static void remap_and_issue(struct thin_c *tc, struct bio *bio,
dm_block_t block)
{
remap(tc, bio, block);
issue(tc, bio);
}
/* /*
* wake_worker() is used when new work is queued and when pool_resume is * wake_worker() is used when new work is queued and when pool_resume is
* ready to continue deferred IO processing. * ready to continue deferred IO processing.
...@@ -932,7 +948,8 @@ static struct new_mapping *get_next_mapping(struct pool *pool) ...@@ -932,7 +948,8 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
} }
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_origin, dm_block_t data_dest, struct dm_dev *origin, dm_block_t data_origin,
dm_block_t data_dest,
struct cell *cell, struct bio *bio) struct cell *cell, struct bio *bio)
{ {
int r; int r;
...@@ -964,7 +981,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, ...@@ -964,7 +981,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
} else { } else {
struct dm_io_region from, to; struct dm_io_region from, to;
from.bdev = tc->pool_dev->bdev; from.bdev = origin->bdev;
from.sector = data_origin * pool->sectors_per_block; from.sector = data_origin * pool->sectors_per_block;
from.count = pool->sectors_per_block; from.count = pool->sectors_per_block;
...@@ -982,6 +999,22 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, ...@@ -982,6 +999,22 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
} }
} }
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_origin, dm_block_t data_dest,
struct cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->pool_dev,
data_origin, data_dest, cell, bio);
}
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_dest,
struct cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio);
}
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_block, struct cell *cell, dm_block_t data_block, struct cell *cell,
struct bio *bio) struct bio *bio)
...@@ -1128,8 +1161,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, ...@@ -1128,8 +1161,8 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
r = alloc_data_block(tc, &data_block); r = alloc_data_block(tc, &data_block);
switch (r) { switch (r) {
case 0: case 0:
schedule_copy(tc, block, lookup_result->block, schedule_internal_copy(tc, block, lookup_result->block,
data_block, cell, bio); data_block, cell, bio);
break; break;
case -ENOSPC: case -ENOSPC:
...@@ -1203,7 +1236,10 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block ...@@ -1203,7 +1236,10 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
r = alloc_data_block(tc, &data_block); r = alloc_data_block(tc, &data_block);
switch (r) { switch (r) {
case 0: case 0:
schedule_zero(tc, block, data_block, cell, bio); if (tc->origin_dev)
schedule_external_copy(tc, block, data_block, cell, bio);
else
schedule_zero(tc, block, data_block, cell, bio);
break; break;
case -ENOSPC: case -ENOSPC:
...@@ -1254,7 +1290,11 @@ static void process_bio(struct thin_c *tc, struct bio *bio) ...@@ -1254,7 +1290,11 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
break; break;
case -ENODATA: case -ENODATA:
provision_block(tc, bio, block, cell); if (bio_data_dir(bio) == READ && tc->origin_dev) {
cell_release_singleton(cell, bio);
remap_to_origin_and_issue(tc, bio);
} else
provision_block(tc, bio, block, cell);
break; break;
default: default:
...@@ -2237,6 +2277,8 @@ static void thin_dtr(struct dm_target *ti) ...@@ -2237,6 +2277,8 @@ static void thin_dtr(struct dm_target *ti)
__pool_dec(tc->pool); __pool_dec(tc->pool);
dm_pool_close_thin_device(tc->td); dm_pool_close_thin_device(tc->td);
dm_put_device(ti, tc->pool_dev); dm_put_device(ti, tc->pool_dev);
if (tc->origin_dev)
dm_put_device(ti, tc->origin_dev);
kfree(tc); kfree(tc);
mutex_unlock(&dm_thin_pool_table.mutex); mutex_unlock(&dm_thin_pool_table.mutex);
...@@ -2245,21 +2287,22 @@ static void thin_dtr(struct dm_target *ti) ...@@ -2245,21 +2287,22 @@ static void thin_dtr(struct dm_target *ti)
/* /*
* Thin target parameters: * Thin target parameters:
* *
* <pool_dev> <dev_id> * <pool_dev> <dev_id> [origin_dev]
* *
* pool_dev: the path to the pool (eg, /dev/mapper/my_pool) * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
* dev_id: the internal device identifier * dev_id: the internal device identifier
* origin_dev: a device external to the pool that should act as the origin
*/ */
static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
{ {
int r; int r;
struct thin_c *tc; struct thin_c *tc;
struct dm_dev *pool_dev; struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md; struct mapped_device *pool_md;
mutex_lock(&dm_thin_pool_table.mutex); mutex_lock(&dm_thin_pool_table.mutex);
if (argc != 2) { if (argc != 2 && argc != 3) {
ti->error = "Invalid argument count"; ti->error = "Invalid argument count";
r = -EINVAL; r = -EINVAL;
goto out_unlock; goto out_unlock;
...@@ -2272,6 +2315,15 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2272,6 +2315,15 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_unlock; goto out_unlock;
} }
if (argc == 3) {
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
if (r) {
ti->error = "Error opening origin device";
goto bad_origin_dev;
}
tc->origin_dev = origin_dev;
}
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
if (r) { if (r) {
ti->error = "Error opening pool device"; ti->error = "Error opening pool device";
...@@ -2324,6 +2376,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2324,6 +2376,9 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
bad_common: bad_common:
dm_put_device(ti, tc->pool_dev); dm_put_device(ti, tc->pool_dev);
bad_pool_dev: bad_pool_dev:
if (tc->origin_dev)
dm_put_device(ti, tc->origin_dev);
bad_origin_dev:
kfree(tc); kfree(tc);
out_unlock: out_unlock:
mutex_unlock(&dm_thin_pool_table.mutex); mutex_unlock(&dm_thin_pool_table.mutex);
...@@ -2382,6 +2437,8 @@ static int thin_status(struct dm_target *ti, status_type_t type, ...@@ -2382,6 +2437,8 @@ static int thin_status(struct dm_target *ti, status_type_t type,
DMEMIT("%s %lu", DMEMIT("%s %lu",
format_dev_t(buf, tc->pool_dev->bdev->bd_dev), format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
(unsigned long) tc->dev_id); (unsigned long) tc->dev_id);
if (tc->origin_dev)
DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
break; break;
} }
} }
...@@ -2419,7 +2476,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2419,7 +2476,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = { static struct target_type thin_target = {
.name = "thin", .name = "thin",
.version = {1, 0, 0}, .version = {1, 1, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = thin_ctr, .ctr = thin_ctr,
.dtr = thin_dtr, .dtr = thin_dtr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment