Commit 2903381f authored by Kent Overstreet's avatar Kent Overstreet

bcache: Take data offset from the bdev superblock.

Add a new superblock version, and consolidate related defines.
Signed-off-by: default avatarGabriel de Perthuis <g2p.code+bcache@gmail.com>
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
parent cef52797
...@@ -223,11 +223,17 @@ struct bkey { ...@@ -223,11 +223,17 @@ struct bkey {
#define BKEY_PADDED(key) \ #define BKEY_PADDED(key) \
union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; } union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
/* Version 1: Backing device /* Version 0: Cache device
* Version 1: Backing device
* Version 2: Seed pointer into btree node checksum * Version 2: Seed pointer into btree node checksum
* Version 3: New UUID format * Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
*/ */
#define BCACHE_SB_VERSION 3 #define BCACHE_SB_VERSION_CDEV 0
#define BCACHE_SB_VERSION_BDEV 1
#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_MAX_VERSION 4
#define SB_SECTOR 8 #define SB_SECTOR 8
#define SB_SIZE 4096 #define SB_SIZE 4096
...@@ -236,13 +242,12 @@ struct bkey { ...@@ -236,13 +242,12 @@ struct bkey {
/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */ /* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
#define MAX_CACHES_PER_SET 8 #define MAX_CACHES_PER_SET 8
#define BDEV_DATA_START 16 /* sectors */ #define BDEV_DATA_START_DEFAULT 16 /* sectors */
struct cache_sb { struct cache_sb {
uint64_t csum; uint64_t csum;
uint64_t offset; /* sector where this sb was written */ uint64_t offset; /* sector where this sb was written */
uint64_t version; uint64_t version;
#define CACHE_BACKING_DEV 1
uint8_t magic[16]; uint8_t magic[16];
...@@ -257,12 +262,28 @@ struct cache_sb { ...@@ -257,12 +262,28 @@ struct cache_sb {
uint64_t seq; uint64_t seq;
uint64_t pad[8]; uint64_t pad[8];
uint64_t nbuckets; /* device size */ union {
uint16_t block_size; /* sectors */ struct {
uint16_t bucket_size; /* sectors */ /* Cache devices */
uint64_t nbuckets; /* device size */
uint16_t block_size; /* sectors */
uint16_t bucket_size; /* sectors */
uint16_t nr_in_set; uint16_t nr_in_set;
uint16_t nr_this_dev; uint16_t nr_this_dev;
};
struct {
/* Backing devices */
uint64_t data_offset;
/*
* block_size from the cache device section is still used by
* backing devices, so don't add anything here until we fix
* things to not need it for backing devices anymore
*/
};
};
uint32_t last_mount; /* time_t */ uint32_t last_mount; /* time_t */
...@@ -861,6 +882,12 @@ static inline bool key_merging_disabled(struct cache_set *c) ...@@ -861,6 +882,12 @@ static inline bool key_merging_disabled(struct cache_set *c)
#endif #endif
} }
static inline bool SB_IS_BDEV(const struct cache_sb *sb)
{
return sb->version == BCACHE_SB_VERSION_BDEV
|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
struct bbio { struct bbio {
unsigned submit_time_us; unsigned submit_time_us;
union { union {
......
...@@ -1220,7 +1220,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) ...@@ -1220,7 +1220,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
part_stat_unlock(); part_stat_unlock();
bio->bi_bdev = dc->bdev; bio->bi_bdev = dc->bdev;
bio->bi_sector += BDEV_DATA_START; bio->bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
s = search_alloc(bio, d); s = search_alloc(bio, d);
......
...@@ -110,15 +110,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, ...@@ -110,15 +110,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->flags = le64_to_cpu(s->flags); sb->flags = le64_to_cpu(s->flags);
sb->seq = le64_to_cpu(s->seq); sb->seq = le64_to_cpu(s->seq);
sb->nbuckets = le64_to_cpu(s->nbuckets);
sb->block_size = le16_to_cpu(s->block_size);
sb->bucket_size = le16_to_cpu(s->bucket_size);
sb->nr_in_set = le16_to_cpu(s->nr_in_set);
sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
sb->last_mount = le32_to_cpu(s->last_mount); sb->last_mount = le32_to_cpu(s->last_mount);
sb->first_bucket = le16_to_cpu(s->first_bucket); sb->first_bucket = le16_to_cpu(s->first_bucket);
sb->keys = le16_to_cpu(s->keys); sb->keys = le16_to_cpu(s->keys);
...@@ -147,53 +139,77 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, ...@@ -147,53 +139,77 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
if (bch_is_zero(sb->uuid, 16)) if (bch_is_zero(sb->uuid, 16))
goto err; goto err;
err = "Unsupported superblock version"; switch (sb->version) {
if (sb->version > BCACHE_SB_VERSION) case BCACHE_SB_VERSION_BDEV:
goto err; sb->block_size = le16_to_cpu(s->block_size);
sb->data_offset = BDEV_DATA_START_DEFAULT;
break;
case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
sb->block_size = le16_to_cpu(s->block_size);
sb->data_offset = le64_to_cpu(s->data_offset);
err = "Bad data offset";
if (sb->data_offset < BDEV_DATA_START_DEFAULT)
goto err;
err = "Bad block/bucket size"; break;
if (!is_power_of_2(sb->block_size) || sb->block_size > PAGE_SECTORS || case BCACHE_SB_VERSION_CDEV:
!is_power_of_2(sb->bucket_size) || sb->bucket_size < PAGE_SECTORS) case BCACHE_SB_VERSION_CDEV_WITH_UUID:
goto err; sb->nbuckets = le64_to_cpu(s->nbuckets);
sb->block_size = le16_to_cpu(s->block_size);
sb->bucket_size = le16_to_cpu(s->bucket_size);
err = "Too many buckets"; sb->nr_in_set = le16_to_cpu(s->nr_in_set);
if (sb->nbuckets > LONG_MAX) sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
goto err;
err = "Not enough buckets"; err = "Too many buckets";
if (sb->nbuckets < 1 << 7) if (sb->nbuckets > LONG_MAX)
goto err; goto err;
err = "Invalid superblock: device too small"; err = "Not enough buckets";
if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) if (sb->nbuckets < 1 << 7)
goto err; goto err;
if (sb->version == CACHE_BACKING_DEV) err = "Bad block/bucket size";
goto out; if (!is_power_of_2(sb->block_size) ||
sb->block_size > PAGE_SECTORS ||
!is_power_of_2(sb->bucket_size) ||
sb->bucket_size < PAGE_SECTORS)
goto err;
err = "Bad UUID"; err = "Invalid superblock: device too small";
if (bch_is_zero(sb->set_uuid, 16)) if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
goto err; goto err;
err = "Bad cache device number in set"; err = "Bad UUID";
if (!sb->nr_in_set || if (bch_is_zero(sb->set_uuid, 16))
sb->nr_in_set <= sb->nr_this_dev || goto err;
sb->nr_in_set > MAX_CACHES_PER_SET)
goto err;
err = "Journal buckets not sequential"; err = "Bad cache device number in set";
for (i = 0; i < sb->keys; i++) if (!sb->nr_in_set ||
if (sb->d[i] != sb->first_bucket + i) sb->nr_in_set <= sb->nr_this_dev ||
sb->nr_in_set > MAX_CACHES_PER_SET)
goto err; goto err;
err = "Too many journal buckets"; err = "Journal buckets not sequential";
if (sb->first_bucket + sb->keys > sb->nbuckets) for (i = 0; i < sb->keys; i++)
goto err; if (sb->d[i] != sb->first_bucket + i)
goto err;
err = "Invalid superblock: first bucket comes before end of super"; err = "Too many journal buckets";
if (sb->first_bucket * sb->bucket_size < 16) if (sb->first_bucket + sb->keys > sb->nbuckets)
goto err;
err = "Invalid superblock: first bucket comes before end of super";
if (sb->first_bucket * sb->bucket_size < 16)
goto err;
break;
default:
err = "Unsupported superblock version";
goto err; goto err;
out: }
sb->last_mount = get_seconds(); sb->last_mount = get_seconds();
err = NULL; err = NULL;
...@@ -286,7 +302,7 @@ void bcache_write_super(struct cache_set *c) ...@@ -286,7 +302,7 @@ void bcache_write_super(struct cache_set *c)
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
struct bio *bio = &ca->sb_bio; struct bio *bio = &ca->sb_bio;
ca->sb.version = BCACHE_SB_VERSION; ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
ca->sb.seq = c->sb.seq; ca->sb.seq = c->sb.seq;
ca->sb.last_mount = c->sb.last_mount; ca->sb.last_mount = c->sb.last_mount;
...@@ -1049,7 +1065,7 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1049,7 +1065,7 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
g = dc->disk.disk; g = dc->disk.disk;
set_capacity(g, dc->bdev->bd_part->nr_sects - 16); set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
bch_cached_dev_request_init(dc); bch_cached_dev_request_init(dc);
...@@ -1802,7 +1818,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, ...@@ -1802,7 +1818,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (err) if (err)
goto err_close; goto err_close;
if (sb->version == CACHE_BACKING_DEV) { if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
err = register_bdev(sb, sb_page, bdev, dc); err = register_bdev(sb, sb_page, bdev, dc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment