Commit 94d7dbf1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-4.17/dm-fixes' of...

Merge tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - a stable fix for DM integrity to use kvfree

 - fix for a 4.17-rc1 change to dm-bufio's buffer alignment

 - fixes for a few sparse warnings

 - remove VLA usage in DM mirror target

 - improve DM thinp Documentation for the "read_only" feature

* tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm thin: update Documentation to clarify when "read_only" is valid
  dm mirror: remove VLA usage
  dm: fix some sparse warnings and whitespace in dax methods
  dm cache background tracker: fix sparse warning
  dm bufio: fix buffer alignment
  dm integrity: use kvfree for kvmalloc'd memory
parents 008464a9 28700a36
...@@ -264,7 +264,10 @@ i) Constructor ...@@ -264,7 +264,10 @@ i) Constructor
data device, but just remove the mapping. data device, but just remove the mapping.
read_only: Don't allow any changes to be made to the pool read_only: Don't allow any changes to be made to the pool
metadata. metadata. This mode is only available after the
thin-pool has been created and first used in full
read/write mode. It cannot be specified on initial
thin-pool creation.
error_if_no_space: Error IOs, instead of queueing, if no space. error_if_no_space: Error IOs, instead of queueing, if no space.
......
...@@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign ...@@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE && if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) { (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size); unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN, snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL); SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_cache) { if (!c->slab_cache) {
r = -ENOMEM; r = -ENOMEM;
......
...@@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b) ...@@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b)
atomic_read(&b->pending_demotes) >= b->max_work; atomic_read(&b->pending_demotes) >= b->max_work;
} }
struct bt_work *alloc_work(struct background_tracker *b) static struct bt_work *alloc_work(struct background_tracker *b)
{ {
if (max_work_reached(b)) if (max_work_reached(b))
return NULL; return NULL;
......
...@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str ...@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
unsigned i; unsigned i;
for (i = 0; i < ic->journal_sections; i++) for (i = 0; i < ic->journal_sections; i++)
kvfree(sl[i]); kvfree(sl[i]);
kfree(sl); kvfree(sl);
} }
static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
#define DM_RAID1_HANDLE_ERRORS 0x01 #define DM_RAID1_HANDLE_ERRORS 0x01
#define DM_RAID1_KEEP_LOG 0x02 #define DM_RAID1_KEEP_LOG 0x02
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
...@@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti) ...@@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti)
unsigned long error_bits; unsigned long error_bits;
unsigned int i; unsigned int i;
struct dm_io_region io[ms->nr_mirrors]; struct dm_io_region io[MAX_NR_MIRRORS];
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
...@@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context) ...@@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context)
static void do_write(struct mirror_set *ms, struct bio *bio) static void do_write(struct mirror_set *ms, struct bio *bio)
{ {
unsigned int i; unsigned int i;
struct dm_io_region io[ms->nr_mirrors], *dest = io; struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
...@@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argc -= args_used; argc -= args_used;
if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
ti->error = "Invalid number of mirrors"; ti->error = "Invalid number of mirrors";
dm_dirty_log_destroy(dl); dm_dirty_log_destroy(dl);
return -EINVAL; return -EINVAL;
...@@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type, ...@@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
int num_feature_args = 0; int num_feature_args = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private; struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1]; char buffer[MAX_NR_MIRRORS + 1];
switch (type) { switch (type) {
case STATUSTYPE_INFO: case STATUSTYPE_INFO:
......
...@@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) ...@@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
sector_t sector, int *srcu_idx) sector_t sector, int *srcu_idx)
__acquires(md->io_barrier)
{ {
struct dm_table *map; struct dm_table *map;
struct dm_target *ti; struct dm_target *ti;
...@@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, ...@@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
} }
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn) long nr_pages, void **kaddr, pfn_t *pfn)
{ {
struct mapped_device *md = dax_get_private(dax_dev); struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS; sector_t sector = pgoff * PAGE_SECTORS;
...@@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, ...@@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
} }
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i) void *addr, size_t bytes, struct iov_iter *i)
{ {
struct mapped_device *md = dax_get_private(dax_dev); struct mapped_device *md = dax_get_private(dax_dev);
sector_t sector = pgoff * PAGE_SECTORS; sector_t sector = pgoff * PAGE_SECTORS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment