dm.c 83.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
Milan Broz's avatar
Milan Broz committed
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds's avatar
Linus Torvalds committed
4 5 6 7 8
 *
 * This file is released under the GPL.
 */

#include "dm.h"
Mike Anderson's avatar
Mike Anderson committed
9
#include "dm-uevent.h"
Linus Torvalds's avatar
Linus Torvalds committed
10 11 12

#include <linux/init.h>
#include <linux/module.h>
13
#include <linux/mutex.h>
Linus Torvalds's avatar
Linus Torvalds committed
14 15 16 17 18 19
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
Darrick J. Wong's avatar
Darrick J. Wong committed
20
#include <linux/hdreg.h>
21
#include <linux/delay.h>
22
#include <linux/wait.h>
23
#include <linux/kthread.h>
24
#include <linux/ktime.h>
25
#include <linux/elevator.h> /* for rq_end_sector() */
26
#include <linux/blk-mq.h>
27 28

#include <trace/events/block.h>
Linus Torvalds's avatar
Linus Torvalds committed
29

30 31
#define DM_MSG_PREFIX "core"

Namhyung Kim's avatar
Namhyung Kim committed
32 33 34 35 36 37 38 39 40 41
#ifdef CONFIG_PRINTK
/*
 * ratelimit state to be used in DMXXX_LIMIT().
 */
DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
		       DEFAULT_RATELIMIT_INTERVAL,
		       DEFAULT_RATELIMIT_BURST);
EXPORT_SYMBOL(dm_ratelimit_state);
#endif

42 43 44 45 46 47 48
/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24

Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

54 55
static DEFINE_IDR(_minor_idr);

56
static DEFINE_SPINLOCK(_minor_lock);
57 58 59 60 61

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

62 63
static struct workqueue_struct *deferred_remove_workqueue;

Linus Torvalds's avatar
Linus Torvalds committed
64
/*
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
65
 * For bio-based dm.
Linus Torvalds's avatar
Linus Torvalds committed
66 67 68 69 70 71
 * One of these is allocated per bio.
 */
struct dm_io {
	struct mapped_device *md;
	int error;
	atomic_t io_count;
Richard Kennedy's avatar
Richard Kennedy committed
72
	struct bio *bio;
73
	unsigned long start_time;
74
	spinlock_t endio_lock;
Mikulas Patocka's avatar
Mikulas Patocka committed
75
	struct dm_stats_aux stats_aux;
Linus Torvalds's avatar
Linus Torvalds committed
76 77
};

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
78 79 80 81 82 83 84
/*
 * For request-based dm.
 * One of these is allocated per request.
 */
struct dm_rq_target_io {
	struct mapped_device *md;
	struct dm_target *ti;
85
	struct request *orig, *clone;
86
	struct kthread_work work;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
87 88 89 90 91
	int error;
	union map_info info;
};

/*
92 93 94 95 96 97
 * For request-based dm - the bio clones we allocate are embedded in these
 * structs.
 *
 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
 * the bioset is created - this means the bio has to come at the end of the
 * struct.
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
98 99 100
 */
struct dm_rq_clone_bio_info {
	struct bio *orig;
101
	struct dm_rq_target_io *tio;
102
	struct bio clone;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
103 104
};

105 106 107 108 109 110 111 112
union map_info *dm_get_rq_mapinfo(struct request *rq)
{
	if (rq && rq->end_io_data)
		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
	return NULL;
}
EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);

113 114
#define MINOR_ALLOCED ((void *)-1)

Linus Torvalds's avatar
Linus Torvalds committed
115 116 117
/*
 * Bits for the md->flags field.
 */
118
#define DMF_BLOCK_IO_FOR_SUSPEND 0
Linus Torvalds's avatar
Linus Torvalds committed
119
#define DMF_SUSPENDED 1
120
#define DMF_FROZEN 2
Jeff Mahoney's avatar
Jeff Mahoney committed
121
#define DMF_FREEING 3
122
#define DMF_DELETING 4
123
#define DMF_NOFLUSH_SUSPENDING 5
124
#define DMF_MERGE_IS_OPTIONAL 6
125
#define DMF_DEFERRED_REMOVE 7
126
#define DMF_SUSPENDED_INTERNALLY 8
Linus Torvalds's avatar
Linus Torvalds committed
127

128 129 130 131 132 133 134 135
/*
 * A dummy definition to make RCU happy.
 * struct dm_table should never be dereferenced in this file.
 */
struct dm_table {
	int undefined__;
};

136 137 138
/*
 * Work processed by per-device workqueue.
 */
Linus Torvalds's avatar
Linus Torvalds committed
139
struct mapped_device {
140
	struct srcu_struct io_barrier;
141
	struct mutex suspend_lock;
Linus Torvalds's avatar
Linus Torvalds committed
142
	atomic_t holders;
143
	atomic_t open_count;
Linus Torvalds's avatar
Linus Torvalds committed
144

145 146 147 148 149
	/*
	 * The current mapping.
	 * Use dm_get_live_table{_fast} or take suspend_lock for
	 * dereference.
	 */
150
	struct dm_table __rcu *map;
151

152 153 154
	struct list_head table_devices;
	struct mutex table_devices_lock;

Linus Torvalds's avatar
Linus Torvalds committed
155 156
	unsigned long flags;

157
	struct request_queue *queue;
158
	unsigned type;
159
	/* Protect queue and type against concurrent access. */
160 161
	struct mutex type_lock;

162 163
	struct target_type *immutable_target_type;

Linus Torvalds's avatar
Linus Torvalds committed
164
	struct gendisk *disk;
Mike Anderson's avatar
Mike Anderson committed
165
	char name[16];
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168 169 170 171

	void *interface_ptr;

	/*
	 * A list of ios that arrived while we were suspended.
	 */
172
	atomic_t pending[2];
Linus Torvalds's avatar
Linus Torvalds committed
173
	wait_queue_head_t wait;
174
	struct work_struct work;
175
	struct bio_list deferred;
176
	spinlock_t deferred_lock;
Linus Torvalds's avatar
Linus Torvalds committed
177

178
	/*
179
	 * Processing queue (flush)
180 181 182
	 */
	struct workqueue_struct *wq;

Linus Torvalds's avatar
Linus Torvalds committed
183 184 185 186
	/*
	 * io objects are allocated from here.
	 */
	mempool_t *io_pool;
187
	mempool_t *rq_pool;
Linus Torvalds's avatar
Linus Torvalds committed
188

Stefan Bader's avatar
Stefan Bader committed
189 190
	struct bio_set *bs;

Linus Torvalds's avatar
Linus Torvalds committed
191 192 193 194 195
	/*
	 * Event handling.
	 */
	atomic_t event_nr;
	wait_queue_head_t eventq;
Mike Anderson's avatar
Mike Anderson committed
196 197 198
	atomic_t uevent_seq;
	struct list_head uevent_list;
	spinlock_t uevent_lock; /* Protect access to uevent_list */
Linus Torvalds's avatar
Linus Torvalds committed
199 200 201 202 203

	/*
	 * freeze/thaw support require holding onto a super block
	 */
	struct super_block *frozen_sb;
204
	struct block_device *bdev;
Darrick J. Wong's avatar
Darrick J. Wong committed
205 206 207

	/* forced geometry settings */
	struct hd_geometry geometry;
Milan Broz's avatar
Milan Broz committed
208

209 210
	/* kobject and completion */
	struct dm_kobject_holder kobj_holder;
211

212 213
	/* zero-length flush that will be cloned and submitted to targets */
	struct bio flush_bio;
Mikulas Patocka's avatar
Mikulas Patocka committed
214

215 216 217
	/* the number of internal suspends */
	unsigned internal_suspend_count;

Mikulas Patocka's avatar
Mikulas Patocka committed
218
	struct dm_stats stats;
219 220 221

	struct kthread_worker kworker;
	struct task_struct *kworker_task;
222 223

	/* for request-based merge heuristic in dm_request_fn() */
224
	unsigned seq_rq_merge_deadline_usecs;
225
	int last_rq_rw;
226 227
	sector_t last_rq_pos;
	ktime_t last_rq_start_time;
228 229 230

	/* for blk-mq request-based DM support */
	struct blk_mq_tag_set tag_set;
231
	bool use_blk_mq;
Linus Torvalds's avatar
Linus Torvalds committed
232 233
};

234 235 236 237 238 239 240 241 242 243 244
#ifdef CONFIG_DM_MQ_DEFAULT
static bool use_blk_mq = true;
#else
static bool use_blk_mq = false;
#endif

bool dm_use_blk_mq(struct mapped_device *md)
{
	return md->use_blk_mq;
}

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
245 246 247 248 249
/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
	mempool_t *io_pool;
250
	mempool_t *rq_pool;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
251 252 253
	struct bio_set *bs;
};

254 255 256 257 258 259
struct table_device {
	struct list_head list;
	atomic_t count;
	struct dm_dev dm_dev;
};

260 261
#define RESERVED_BIO_BASED_IOS		16
#define RESERVED_REQUEST_BASED_IOS	256
262
#define RESERVED_MAX_IOS		1024
263
static struct kmem_cache *_io_cache;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
264
static struct kmem_cache *_rq_tio_cache;
265
static struct kmem_cache *_rq_cache;
266

267 268 269 270 271
/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;

272 273 274 275 276
/*
 * Request-based DM's mempools' reserved IOs set by the user.
 */
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;

277
static unsigned __dm_get_module_param(unsigned *module_param,
278 279
				      unsigned def, unsigned max)
{
280 281
	unsigned param = ACCESS_ONCE(*module_param);
	unsigned modified_param = 0;
282

283 284 285 286
	if (!param)
		modified_param = def;
	else if (param > max)
		modified_param = max;
287

288 289 290
	if (modified_param) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
291 292
	}

293
	return param;
294 295
}

296 297
unsigned dm_get_reserved_bio_based_ios(void)
{
298
	return __dm_get_module_param(&reserved_bio_based_ios,
299 300 301 302
				     RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);

303 304
unsigned dm_get_reserved_rq_based_ios(void)
{
305
	return __dm_get_module_param(&reserved_rq_based_ios,
306 307 308 309
				     RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);

Linus Torvalds's avatar
Linus Torvalds committed
310 311
static int __init local_init(void)
{
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
312
	int r = -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed
313 314

	/* allocate a slab for the dm_ios */
Alasdair G Kergon's avatar
Alasdair G Kergon committed
315
	_io_cache = KMEM_CACHE(dm_io, 0);
Linus Torvalds's avatar
Linus Torvalds committed
316
	if (!_io_cache)
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
317
		return r;
Linus Torvalds's avatar
Linus Torvalds committed
318

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
319 320
	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
	if (!_rq_tio_cache)
321
		goto out_free_io_cache;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
322

323 324 325 326 327
	_rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
				      __alignof__(struct request), 0, NULL);
	if (!_rq_cache)
		goto out_free_rq_tio_cache;

Mike Anderson's avatar
Mike Anderson committed
328
	r = dm_uevent_init();
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
329
	if (r)
330
		goto out_free_rq_cache;
Mike Anderson's avatar
Mike Anderson committed
331

332 333 334 335 336 337
	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
	if (!deferred_remove_workqueue) {
		r = -ENOMEM;
		goto out_uevent_exit;
	}

Linus Torvalds's avatar
Linus Torvalds committed
338 339
	_major = major;
	r = register_blkdev(_major, _name);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
340
	if (r < 0)
341
		goto out_free_workqueue;
Linus Torvalds's avatar
Linus Torvalds committed
342 343 344 345 346

	if (!_major)
		_major = r;

	return 0;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
347

348 349
out_free_workqueue:
	destroy_workqueue(deferred_remove_workqueue);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
350 351
out_uevent_exit:
	dm_uevent_exit();
352 353
out_free_rq_cache:
	kmem_cache_destroy(_rq_cache);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
354 355
out_free_rq_tio_cache:
	kmem_cache_destroy(_rq_tio_cache);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
356 357 358 359
out_free_io_cache:
	kmem_cache_destroy(_io_cache);

	return r;
Linus Torvalds's avatar
Linus Torvalds committed
360 361 362 363
}

static void local_exit(void)
{
364
	flush_scheduled_work();
365
	destroy_workqueue(deferred_remove_workqueue);
366

367
	kmem_cache_destroy(_rq_cache);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
368
	kmem_cache_destroy(_rq_tio_cache);
Linus Torvalds's avatar
Linus Torvalds committed
369
	kmem_cache_destroy(_io_cache);
370
	unregister_blkdev(_major, _name);
Mike Anderson's avatar
Mike Anderson committed
371
	dm_uevent_exit();
Linus Torvalds's avatar
Linus Torvalds committed
372 373 374 375 376 377

	_major = 0;

	DMINFO("cleaned up");
}

378
static int (*_inits[])(void) __initdata = {
Linus Torvalds's avatar
Linus Torvalds committed
379 380 381 382
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
383
	dm_io_init,
384
	dm_kcopyd_init,
Linus Torvalds's avatar
Linus Torvalds committed
385
	dm_interface_init,
Mikulas Patocka's avatar
Mikulas Patocka committed
386
	dm_statistics_init,
Linus Torvalds's avatar
Linus Torvalds committed
387 388
};

389
static void (*_exits[])(void) = {
Linus Torvalds's avatar
Linus Torvalds committed
390 391 392 393
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
394
	dm_io_exit,
395
	dm_kcopyd_exit,
Linus Torvalds's avatar
Linus Torvalds committed
396
	dm_interface_exit,
Mikulas Patocka's avatar
Mikulas Patocka committed
397
	dm_statistics_exit,
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
427 428 429 430 431

	/*
	 * Should be empty by this point.
	 */
	idr_destroy(&_minor_idr);
Linus Torvalds's avatar
Linus Torvalds committed
432 433 434 435 436
}

/*
 * Block device functions
 */
437 438 439 440 441
int dm_deleting_md(struct mapped_device *md)
{
	return test_bit(DMF_DELETING, &md->flags);
}

Al Viro's avatar
Al Viro committed
442
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds's avatar
Linus Torvalds committed
443 444 445
{
	struct mapped_device *md;

Jeff Mahoney's avatar
Jeff Mahoney committed
446 447
	spin_lock(&_minor_lock);

Al Viro's avatar
Al Viro committed
448
	md = bdev->bd_disk->private_data;
Jeff Mahoney's avatar
Jeff Mahoney committed
449 450 451
	if (!md)
		goto out;

452
	if (test_bit(DMF_FREEING, &md->flags) ||
453
	    dm_deleting_md(md)) {
Jeff Mahoney's avatar
Jeff Mahoney committed
454 455 456 457
		md = NULL;
		goto out;
	}

Linus Torvalds's avatar
Linus Torvalds committed
458
	dm_get(md);
459
	atomic_inc(&md->open_count);
Jeff Mahoney's avatar
Jeff Mahoney committed
460 461 462 463
out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
Linus Torvalds's avatar
Linus Torvalds committed
464 465
}

466
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
Linus Torvalds's avatar
Linus Torvalds committed
467
{
468
	struct mapped_device *md;
469

470 471
	spin_lock(&_minor_lock);

472 473 474 475
	md = disk->private_data;
	if (WARN_ON(!md))
		goto out;

476 477
	if (atomic_dec_and_test(&md->open_count) &&
	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
478
		queue_work(deferred_remove_workqueue, &deferred_remove_work);
479

Linus Torvalds's avatar
Linus Torvalds committed
480
	dm_put(md);
481
out:
482
	spin_unlock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
483 484
}

485 486 487 488 489 490 491 492
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
493
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
494 495 496 497 498
{
	int r = 0;

	spin_lock(&_minor_lock);

499
	if (dm_open_count(md)) {
500
		r = -EBUSY;
501 502 503 504
		if (mark_deferred)
			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
		r = -EEXIST;
505 506 507 508 509 510 511 512
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
int dm_cancel_deferred_remove(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (test_bit(DMF_DELETING, &md->flags))
		r = -EBUSY;
	else
		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

static void do_deferred_remove(struct work_struct *w)
{
	dm_deferred_remove();
}

Mikulas Patocka's avatar
Mikulas Patocka committed
534 535 536 537 538
sector_t dm_get_size(struct mapped_device *md)
{
	return get_capacity(md->disk);
}

539 540 541 542 543
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
	return md->queue;
}

Mikulas Patocka's avatar
Mikulas Patocka committed
544 545 546 547 548
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
	return &md->stats;
}

Darrick J. Wong's avatar
Darrick J. Wong committed
549 550 551 552 553 554 555
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

Al Viro's avatar
Al Viro committed
556
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
557 558
			unsigned int cmd, unsigned long arg)
{
Al Viro's avatar
Al Viro committed
559
	struct mapped_device *md = bdev->bd_disk->private_data;
560
	int srcu_idx;
561
	struct dm_table *map;
562 563 564
	struct dm_target *tgt;
	int r = -ENOTTY;

565
retry:
566 567
	map = dm_get_live_table(md, &srcu_idx);

568 569 570 571 572 573 574 575
	if (!map || !dm_table_get_size(map))
		goto out;

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
		goto out;

	tgt = dm_table_get_target(map, 0);
576 577
	if (!tgt->type->ioctl)
		goto out;
578

579
	if (dm_suspended_md(md)) {
580 581 582 583
		r = -EAGAIN;
		goto out;
	}

584
	r = tgt->type->ioctl(tgt, cmd, arg);
585 586

out:
587
	dm_put_live_table(md, srcu_idx);
588

589 590 591 592 593
	if (r == -ENOTCONN) {
		msleep(10);
		goto retry;
	}

594 595 596
	return r;
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
597
static struct dm_io *alloc_io(struct mapped_device *md)
Linus Torvalds's avatar
Linus Torvalds committed
598 599 600 601
{
	return mempool_alloc(md->io_pool, GFP_NOIO);
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
602
static void free_io(struct mapped_device *md, struct dm_io *io)
Linus Torvalds's avatar
Linus Torvalds committed
603 604 605 606
{
	mempool_free(io, md->io_pool);
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
607
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
Linus Torvalds's avatar
Linus Torvalds committed
608
{
609
	bio_put(&tio->clone);
Linus Torvalds's avatar
Linus Torvalds committed
610 611
}

612 613
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
					    gfp_t gfp_mask)
614
{
615
	return mempool_alloc(md->io_pool, gfp_mask);
616 617 618 619
}

static void free_rq_tio(struct dm_rq_target_io *tio)
{
620
	mempool_free(tio, tio->md->io_pool);
621 622
}

623 624 625 626 627 628 629 630 631 632 633
static struct request *alloc_clone_request(struct mapped_device *md,
					   gfp_t gfp_mask)
{
	return mempool_alloc(md->rq_pool, gfp_mask);
}

static void free_clone_request(struct mapped_device *md, struct request *rq)
{
	mempool_free(rq, md->rq_pool);
}

634 635 636 637 638 639
static int md_in_flight(struct mapped_device *md)
{
	return atomic_read(&md->pending[READ]) +
	       atomic_read(&md->pending[WRITE]);
}

640 641 642
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
Mikulas Patocka's avatar
Mikulas Patocka committed
643
	struct bio *bio = io->bio;
Tejun Heo's avatar
Tejun Heo committed
644
	int cpu;
Mikulas Patocka's avatar
Mikulas Patocka committed
645
	int rw = bio_data_dir(bio);
646 647 648

	io->start_time = jiffies;

649 650 651
	cpu = part_stat_lock();
	part_round_stats(cpu, &dm_disk(md)->part0);
	part_stat_unlock();
652 653
	atomic_set(&dm_disk(md)->part0.in_flight[rw],
		atomic_inc_return(&md->pending[rw]));
Mikulas Patocka's avatar
Mikulas Patocka committed
654 655

	if (unlikely(dm_stats_used(&md->stats)))
656
		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
Mikulas Patocka's avatar
Mikulas Patocka committed
657
				    bio_sectors(bio), false, 0, &io->stats_aux);
658 659
}

660
static void end_io_acct(struct dm_io *io)
661 662 663 664
{
	struct mapped_device *md = io->md;
	struct bio *bio = io->bio;
	unsigned long duration = jiffies - io->start_time;
665
	int pending;
666 667
	int rw = bio_data_dir(bio);

668
	generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
669

Mikulas Patocka's avatar
Mikulas Patocka committed
670
	if (unlikely(dm_stats_used(&md->stats)))
671
		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
Mikulas Patocka's avatar
Mikulas Patocka committed
672 673
				    bio_sectors(bio), true, duration, &io->stats_aux);

674 675
	/*
	 * After this is decremented the bio must not be touched if it is
676
	 * a flush.
677
	 */
678 679
	pending = atomic_dec_return(&md->pending[rw]);
	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
680
	pending += atomic_read(&md->pending[rw^0x1]);
681

682 683 684
	/* nudge anyone waiting on suspend queue */
	if (!pending)
		wake_up(&md->wait);
685 686
}

Linus Torvalds's avatar
Linus Torvalds committed
687 688 689
/*
 * Add the bio to the list of deferred io.
 */
Mikulas Patocka's avatar
Mikulas Patocka committed
690
static void queue_io(struct mapped_device *md, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
691
{
692
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
693

694
	spin_lock_irqsave(&md->deferred_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
695
	bio_list_add(&md->deferred, bio);
696
	spin_unlock_irqrestore(&md->deferred_lock, flags);
697
	queue_work(md->wq, &md->work);
Linus Torvalds's avatar
Linus Torvalds committed
698 699 700 701 702
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
703
 * dm_put_live_table() when finished.
Linus Torvalds's avatar
Linus Torvalds committed
704
 */
705
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
Linus Torvalds's avatar
Linus Torvalds committed
706
{
707 708 709 710
	*srcu_idx = srcu_read_lock(&md->io_barrier);

	return srcu_dereference(md->map, &md->io_barrier);
}
Linus Torvalds's avatar
Linus Torvalds committed
711

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
	srcu_read_unlock(&md->io_barrier, srcu_idx);
}

void dm_sync_table(struct mapped_device *md)
{
	synchronize_srcu(&md->io_barrier);
	synchronize_rcu_expedited();
}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(md->map);
}
Linus Torvalds's avatar
Linus Torvalds committed
732

733 734 735
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
736 737
}

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
/*
 * Open a table device so we can use it as a map destination.
 */
static int open_table_device(struct table_device *td, dev_t dev,
			     struct mapped_device *md)
{
	static char *_claim_ptr = "I belong to device-mapper";
	struct block_device *bdev;

	int r;

	BUG_ON(td->dm_dev.bdev);

	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);

	r = bd_link_disk_holder(bdev, dm_disk(md));
	if (r) {
		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
		return r;
	}

	td->dm_dev.bdev = bdev;
	return 0;
}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
	if (!td->dm_dev.bdev)
		return;

	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
	td->dm_dev.bdev = NULL;
}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
					      fmode_t mode) {
	struct table_device *td;

	list_for_each_entry(td, l, list)
		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
			return td;

	return NULL;
}

int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
			struct dm_dev **result) {
	int r;
	struct table_device *td;

	mutex_lock(&md->table_devices_lock);
	td = find_table_device(&md->table_devices, dev, mode);
	if (!td) {
		td = kmalloc(sizeof(*td), GFP_KERNEL);
		if (!td) {
			mutex_unlock(&md->table_devices_lock);
			return -ENOMEM;
		}

		td->dm_dev.mode = mode;
		td->dm_dev.bdev = NULL;

		if ((r = open_table_device(td, dev, md))) {
			mutex_unlock(&md->table_devices_lock);
			kfree(td);
			return r;
		}

		format_dev_t(td->dm_dev.name, dev);

		atomic_set(&td->count, 0);
		list_add(&td->list, &md->table_devices);
	}
	atomic_inc(&td->count);
	mutex_unlock(&md->table_devices_lock);

	*result = &td->dm_dev;
	return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
	struct table_device *td = container_of(d, struct table_device, dm_dev);

	mutex_lock(&md->table_devices_lock);
	if (atomic_dec_and_test(&td->count)) {
		close_table_device(td, md);
		list_del(&td->list);
		kfree(td);
	}
	mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);

static void free_table_devices(struct list_head *devices)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, devices) {
		struct table_device *td = list_entry(tmp, struct table_device, list);

		DMWARN("dm_destroy: %s still exists with %d references",
		       td->dm_dev.name, atomic_read(&td->count));
		kfree(td);
	}
}

Darrick J. Wong's avatar
Darrick J. Wong committed
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
879 880 881 882 883 884 885 886 887
/*-----------------------------------------------------------------
 * CRUD START:
 *   A more elegant soln is in the works that uses the queue
 *   merge fn, unfortunately there are a couple of changes to
 *   the block layer that I want to make for this.  So in the
 *   interests of getting something for people to use I give
 *   you this clearly demarcated crap.
 *---------------------------------------------------------------*/

888 889 890 891 892
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
893 894 895 896
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
897
static void dec_pending(struct dm_io *io, int error)
Linus Torvalds's avatar
Linus Torvalds committed
898
{
899
	unsigned long flags;
900 901 902
	int io_error;
	struct bio *bio;
	struct mapped_device *md = io->md;
903 904

	/* Push-back supersedes any I/O errors */
905 906 907 908 909 910
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
		if (!(io->error > 0 && __noflush_suspending(md)))
			io->error = error;
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}
Linus Torvalds's avatar
Linus Torvalds committed
911 912

	if (atomic_dec_and_test(&io->io_count)) {
913 914 915 916
		if (io->error == DM_ENDIO_REQUEUE) {
			/*
			 * Target requested pushing back the I/O.
			 */
917
			spin_lock_irqsave(&md->deferred_lock, flags);
918 919 920
			if (__noflush_suspending(md))
				bio_list_add_head(&md->deferred, io->bio);
			else
921 922
				/* noflush suspend was interrupted. */
				io->error = -EIO;
923
			spin_unlock_irqrestore(&md->deferred_lock, flags);
924 925
		}

926 927
		io_error = io->error;
		bio = io->bio;
928 929 930 931 932
		end_io_acct(io);
		free_io(md, io);

		if (io_error == DM_ENDIO_REQUEUE)
			return;
933

934
		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
935
			/*
936 937
			 * Preflush done for flush with data, reissue
			 * without REQ_FLUSH.
938
			 */
939 940
			bio->bi_rw &= ~REQ_FLUSH;
			queue_io(md, bio);
941
		} else {
942
			/* done with normal IO or empty flush */
943
			trace_block_bio_complete(md->queue, bio, io_error);
944
			bio_endio(bio, io_error);
945
		}
Linus Torvalds's avatar
Linus Torvalds committed
946 947 948
	}
}

949 950 951 952 953 954 955 956
static void disable_write_same(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE SAME, disable it */
	limits->max_write_same_sectors = 0;
}

957
static void clone_endio(struct bio *bio, int error)
Linus Torvalds's avatar
Linus Torvalds committed
958
{
959
	int r = error;
Mikulas Patocka's avatar
Mikulas Patocka committed
960
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
961
	struct dm_io *io = tio->io;
Stefan Bader's avatar
Stefan Bader committed
962
	struct mapped_device *md = tio->io->md;
Linus Torvalds's avatar
Linus Torvalds committed
963 964 965 966 967 968
	dm_endio_fn endio = tio->ti->type->end_io;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
Mikulas Patocka's avatar
Mikulas Patocka committed
969
		r = endio(tio->ti, bio, error);
970 971 972 973 974
		if (r < 0 || r == DM_ENDIO_REQUEUE)
			/*
			 * error and requeue request are handled
			 * in dec_pending().
			 */
Linus Torvalds's avatar
Linus Torvalds committed
975
			error = r;
976 977
		else if (r == DM_ENDIO_INCOMPLETE)
			/* The target will handle the io */
978
			return;
979 980 981 982
		else if (r) {
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
Linus Torvalds's avatar
Linus Torvalds committed
983 984
	}

985 986 987 988
	if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
		disable_write_same(md);

Stefan Bader's avatar
Stefan Bader committed
989
	free_tio(md, tio);
990
	dec_pending(io, error);
Linus Torvalds's avatar
Linus Torvalds committed
991 992
}

993 994 995 996 997
/*
 * Partial completion handling for request-based dm
 */
static void end_clone_bio(struct bio *clone, int error)
{
Mikulas Patocka's avatar
Mikulas Patocka committed
998 999
	struct dm_rq_clone_bio_info *info =
		container_of(clone, struct dm_rq_clone_bio_info, clone);
1000 1001
	struct dm_rq_target_io *tio = info->tio;
	struct bio *bio = info->orig;
1002
	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

	bio_put(clone);

	if (tio->error)
		/*
		 * An error has already been detected on the request.
		 * Once error occurred, just let clone->end_io() handle
		 * the remainder.
		 */
		return;
	else if (error) {
		/*
		 * Don't notice the error to the upper layer yet.
		 * The error handling decision is made by the target driver,
		 * when the request is completed.
		 */
		tio->error = error;
		return;
	}

	/*
	 * I/O for the bio successfully completed.
	 * Notice the data completion to the upper layer.
	 */

	/*
	 * bios are processed from the head of the list.
	 * So the completing bio should always be rq->bio.
	 * If it's not, something wrong is happening.
	 */
	if (tio->orig->bio != bio)
		DMERR("bio completion is going in the middle of the request");

	/*
	 * Update the original request.
	 * Do not use blk_end_request() here, because it may complete
	 * the original request before the clone, and break the ordering.
	 */
	blk_update_request(tio->orig, 0, nr_bytes);
}

1044 1045 1046 1047 1048
static struct dm_rq_target_io *tio_from_request(struct request *rq)
{
	return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
}

1049 1050 1051 1052 1053
/*
 * Don't touch any member of the md after calling this function because
 * the md may be freed in dm_put() at the end of this function.
 * Or do dm_get() before calling this function and dm_put() later.
 */
1054
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1055
{
1056
	atomic_dec(&md->pending[rw]);
1057 1058

	/* nudge anyone waiting on suspend queue */
1059
	if (!md_in_flight(md))
1060 1061
		wake_up(&md->wait);

1062 1063 1064 1065 1066 1067
	/*
	 * Run this off this callpath, as drivers could invoke end_io while
	 * inside their request_fn (and holding the queue lock). Calling
	 * back into ->request_fn() could deadlock attempting to grab the
	 * queue lock again.
	 */
1068
	if (run_queue) {
1069 1070
		if (md->queue->mq_ops)
			blk_mq_run_hw_queues(md->queue, true);
1071
		else
1072 1073
			blk_run_queue_async(md->queue);
	}
1074 1075 1076 1077 1078 1079 1080

	/*
	 * dm_put() must be at the end of this function. See the comment above
	 */
	dm_put(md);
}

1081
static void free_rq_clone(struct request *clone)
1082 1083
{
	struct dm_rq_target_io *tio = clone->end_io_data;
1084
	struct mapped_device *md = tio->md;
1085 1086

	blk_rq_unprep_clone(clone);
1087

1088 1089
	if (md->type == DM_TYPE_MQ_REQUEST_BASED)
		/* stacked on blk-mq queue(s) */
1090
		tio->ti->type->release_clone_rq(clone);
1091 1092
	else if (!md->queue->mq_ops)
		/* request_fn queue stacked on request_fn queue(s) */
1093
		free_clone_request(md, clone);
1094 1095 1096 1097 1098
	/*
	 * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
	 * no need to call free_clone_request() because we leverage blk-mq by
	 * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
	 */
1099 1100 1101

	if (!md->queue->mq_ops)
		free_rq_tio(tio);
1102 1103
}

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1104 1105
/*
 * Complete the clone and the original request.
1106 1107
 * Must be called without clone's queue lock held,
 * see end_clone_request() for more details.
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1108 1109 1110 1111 1112 1113 1114 1115
 */
static void dm_end_request(struct request *clone, int error)
{
	int rw = rq_data_dir(clone);
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;

1116
	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
		rq->errors = clone->errors;
		rq->resid_len = clone->resid_len;

		if (rq->sense)
			/*
			 * We are using the sense buffer of the original
			 * request.
			 * So setting the length of the sense data is enough.
			 */
			rq->sense_len = clone->sense_len;
	}

1129
	free_rq_clone(clone);
1130 1131 1132 1133
	if (!rq->q->mq_ops)
		blk_end_request_all(rq, error);
	else
		blk_mq_end_request(rq, error);
1134
	rq_completed(md, rw, true);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1135 1136
}

1137 1138
static void dm_unprep_request(struct request *rq)
{
1139
	struct dm_rq_target_io *tio = tio_from_request(rq);
1140
	struct request *clone = tio->clone;
1141

1142 1143 1144 1145
	if (!rq->q->mq_ops) {
		rq->special = NULL;
		rq->cmd_flags &= ~REQ_DONTPREP;
	}
1146

1147
	if (clone)
1148
		free_rq_clone(clone);
1149 1150 1151 1152 1153
}

/*
 * Requeue the original request of a clone.
 */
1154
static void old_requeue_request(struct request *rq)
1155 1156 1157 1158 1159 1160
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	blk_requeue_request(q, rq);
Junichi Nomura's avatar
Junichi Nomura committed
1161
	blk_run_queue_async(q);
1162
	spin_unlock_irqrestore(q->queue_lock, flags);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
}

static void dm_requeue_unmapped_original_request(struct mapped_device *md,
						 struct request *rq)
{
	int rw = rq_data_dir(rq);

	dm_unprep_request(rq);

	if (!rq->q->mq_ops)
		old_requeue_request(rq);
	else {
		blk_mq_requeue_request(rq);
		blk_mq_kick_requeue_list(rq->q);
	}
1178

1179 1180 1181 1182 1183 1184 1185 1186
	rq_completed(md, rw, false);
}

static void dm_requeue_unmapped_request(struct request *clone)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	dm_requeue_unmapped_original_request(tio->md, tio->orig);
1187 1188
}

1189
static void old_stop_queue(struct request_queue *q)
1190 1191 1192
{
	unsigned long flags;

1193 1194 1195
	if (blk_queue_stopped(q))
		return;

1196
	spin_lock_irqsave(q->queue_lock, flags);
1197
	blk_stop_queue(q);
1198 1199 1200
	spin_unlock_irqrestore(q->queue_lock, flags);
}

1201
static void stop_queue(struct request_queue *q)
1202
{
1203 1204 1205 1206
	if (!q->mq_ops)
		old_stop_queue(q);
	else
		blk_mq_stop_hw_queues(q);
1207 1208
}

1209
static void old_start_queue(struct request_queue *q)
1210 1211 1212 1213
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
1214 1215
	if (blk_queue_stopped(q))
		blk_start_queue(q);
1216 1217 1218
	spin_unlock_irqrestore(q->queue_lock, flags);
}

1219 1220 1221 1222 1223 1224 1225 1226
static void start_queue(struct request_queue *q)
{
	if (!q->mq_ops)
		old_start_queue(q);
	else
		blk_mq_start_stopped_hw_queues(q, true);
}

1227
static void dm_done(struct request *clone, int error, bool mapped)
1228
{
1229
	int r = error;
1230
	struct dm_rq_target_io *tio = clone->end_io_data;
1231
	dm_request_endio_fn rq_end_io = NULL;
1232

1233 1234 1235 1236 1237 1238
	if (tio->ti) {
		rq_end_io = tio->ti->type->rq_end_io;

		if (mapped && rq_end_io)
			r = rq_end_io(tio->ti, clone, error, &tio->info);
	}
1239

1240 1241 1242 1243
	if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
		     !clone->q->limits.max_write_same_sectors))
		disable_write_same(tio->md);

1244
	if (r <= 0)
1245
		/* The target wants to complete the I/O */
1246 1247
		dm_end_request(clone, r);
	else if (r == DM_ENDIO_INCOMPLETE)
1248 1249
		/* The target will handle the I/O */
		return;
1250
	else if (r == DM_ENDIO_REQUEUE)
1251 1252 1253
		/* The target wants to requeue the I/O */
		dm_requeue_unmapped_request(clone);
	else {
1254
		DMWARN("unimplemented target endio return value: %d", r);
1255 1256 1257 1258
		BUG();
	}
}

1259 1260 1261 1262 1263 1264
/*
 * Request completion handler for request-based dm
 */
static void dm_softirq_done(struct request *rq)
{
	bool mapped = true;
1265
	struct dm_rq_target_io *tio = tio_from_request(rq);
1266
	struct request *clone = tio->clone;
1267
	int rw;
1268

1269
	if (!clone) {
1270 1271 1272 1273 1274 1275 1276 1277 1278
		rw = rq_data_dir(rq);
		if (!rq->q->mq_ops) {
			blk_end_request_all(rq, tio->error);
			rq_completed(tio->md, rw, false);
			free_rq_tio(tio);
		} else {
			blk_mq_end_request(rq, tio->error);
			rq_completed(tio->md, rw, false);
		}
1279 1280
		return;
	}
1281 1282 1283 1284 1285 1286 1287

	if (rq->cmd_flags & REQ_FAILED)
		mapped = false;

	dm_done(clone, tio->error, mapped);
}

1288 1289 1290 1291
/*
 * Complete the clone and the original request with the error status
 * through softirq context.
 */
1292
static void dm_complete_request(struct request *rq, int error)
1293
{
1294
	struct dm_rq_target_io *tio = tio_from_request(rq);
1295 1296 1297 1298 1299 1300 1301 1302 1303

	tio->error = error;
	blk_complete_request(rq);
}

/*
 * Complete the not-mapped clone and the original request with the error status
 * through softirq context.
 * Target's rq_end_io() function isn't called.
1304
 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1305
 */
1306
static void dm_kill_unmapped_request(struct request *rq, int error)
1307 1308
{
	rq->cmd_flags |= REQ_FAILED;
1309
	dm_complete_request(rq, error);
1310 1311 1312
}

/*
1313
 * Called with the clone's queue lock held (for non-blk-mq)
1314 1315 1316
 */
static void end_clone_request(struct request *clone, int error)
{
1317 1318
	struct dm_rq_target_io *tio = clone->end_io_data;

1319 1320 1321 1322 1323 1324 1325 1326 1327
	if (!clone->q->mq_ops) {
		/*
		 * For just cleaning up the information of the queue in which
		 * the clone was dispatched.
		 * The clone is *NOT* freed actually here because it is alloced
		 * from dm own mempool (REQ_ALLOCED isn't set).
		 */
		__blk_put_request(clone->q, clone);
	}
1328 1329 1330

	/*
	 * Actual request completion is done in a softirq context which doesn't
1331
	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
1332 1333 1334
	 *     - another request may be submitted by the upper level driver
	 *       of the stacking during the completion
	 *     - the submission which requires queue lock may be done
1335
	 *       against this clone's queue
1336
	 */
1337
	dm_complete_request(tio->orig, error);
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
	sector_t target_offset = dm_target_offset(ti, sector);

	return ti->len - target_offset;
}

static sector_t max_io_len(sector_t sector, struct dm_target *ti)
Linus Torvalds's avatar
Linus Torvalds committed
1352
{
1353
	sector_t len = max_io_len_target_boundary(sector, ti);
1354
	sector_t offset, max_len;
Linus Torvalds's avatar
Linus Torvalds committed
1355 1356

	/*
1357
	 * Does the target need to split even further?
Linus Torvalds's avatar
Linus Torvalds committed
1358
	 */
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
	if (ti->max_io_len) {
		offset = dm_target_offset(ti, sector);
		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
			max_len = sector_div(offset, ti->max_io_len);
		else
			max_len = offset & (ti->max_io_len - 1);
		max_len = ti->max_io_len - max_len;

		if (len > max_len)
			len = max_len;
Linus Torvalds's avatar
Linus Torvalds committed
1369 1370 1371 1372 1373
	}

	return len;
}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
	if (len > UINT_MAX) {
		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
		      (unsigned long long)len, UINT_MAX);
		ti->error = "Maximum size of target IO is too large";
		return -EINVAL;
	}

	ti->max_io_len = (uint32_t) len;

	return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);

1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
 * allowed for all bio types except REQ_FLUSH.
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <------- bi_size ------->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
	BUG_ON(bio->bi_rw & REQ_FLUSH);
	BUG_ON(bi_size > *tio->len_ptr);
	BUG_ON(n_sectors > bi_size);
	*tio->len_ptr -= bi_size - n_sectors;
	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1429
static void __map_bio(struct dm_target_io *tio)
Linus Torvalds's avatar
Linus Torvalds committed
1430 1431
{
	int r;
1432
	sector_t sector;
Stefan Bader's avatar
Stefan Bader committed
1433
	struct mapped_device *md;
1434
	struct bio *clone = &tio->clone;
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1435
	struct dm_target *ti = tio->ti;
Linus Torvalds's avatar
Linus Torvalds committed
1436 1437 1438 1439 1440 1441 1442 1443 1444

	clone->bi_end_io = clone_endio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
1445
	sector = clone->bi_iter.bi_sector;
Mikulas Patocka's avatar
Mikulas Patocka committed
1446
	r = ti->type->map(ti, clone);
1447
	if (r == DM_MAPIO_REMAPPED) {
Linus Torvalds's avatar
Linus Torvalds committed
1448
		/* the bio has been remapped so dispatch it */
1449

1450 1451
		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
				      tio->io->bio->bi_bdev->bd_dev, sector);
1452

Linus Torvalds's avatar
Linus Torvalds committed
1453
		generic_make_request(clone);
1454 1455
	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
		/* error the io and bail out, or requeue it if needed */
Stefan Bader's avatar
Stefan Bader committed
1456 1457 1458
		md = tio->io->md;
		dec_pending(tio->io, r);
		free_tio(md, tio);
1459 1460 1461
	} else if (r) {
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
Linus Torvalds's avatar
Linus Torvalds committed
1462 1463 1464 1465 1466 1467 1468 1469 1470
	}
}

struct clone_info {
	struct mapped_device *md;
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
1471
	unsigned sector_count;
Linus Torvalds's avatar
Linus Torvalds committed
1472 1473
};

1474
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1475
{
1476 1477
	bio->bi_iter.bi_sector = sector;
	bio->bi_iter.bi_size = to_bytes(len);
Linus Torvalds's avatar
Linus Torvalds committed
1478 1479 1480 1481 1482
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
1483
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1484
		      sector_t sector, unsigned len)
Linus Torvalds's avatar
Linus Torvalds committed
1485
{
1486
	struct bio *clone = &tio->clone;
Linus Torvalds's avatar
Linus Torvalds committed
1487

1488 1489 1490 1491
	__bio_clone_fast(clone, bio);

	if (bio_integrity(bio))
		bio_integrity_clone(clone, bio, GFP_NOIO);
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1492

1493 1494 1495 1496 1497
	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
	clone->bi_iter.bi_size = to_bytes(len);

	if (bio_integrity(bio))
		bio_integrity_trim(clone, 0, len);
Linus Torvalds's avatar
Linus Torvalds committed
1498 1499
}

1500
static struct dm_target_io *alloc_tio(struct clone_info *ci,
1501
				      struct dm_target *ti,
1502
				      unsigned target_bio_nr)
1503
{
1504 1505 1506
	struct dm_target_io *tio;
	struct bio *clone;

1507
	clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1508
	tio = container_of(clone, struct dm_target_io, clone);
1509 1510 1511

	tio->io = ci->io;
	tio->ti = ti;
1512
	tio->target_bio_nr = target_bio_nr;
1513 1514 1515 1516

	return tio;
}

1517 1518
static void __clone_and_map_simple_bio(struct clone_info *ci,
				       struct dm_target *ti,
1519
				       unsigned target_bio_nr, unsigned *len)
1520
{
1521
	struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1522
	struct bio *clone = &tio->clone;
1523

1524 1525
	tio->len_ptr = len;

1526
	__bio_clone_fast(clone, ci->bio);
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1527
	if (len)
1528
		bio_setup_sector(clone, ci->sector, *len);
1529

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1530
	__map_bio(tio);
1531 1532
}

1533
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1534
				  unsigned num_bios, unsigned *len)
1535
{
1536
	unsigned target_bio_nr;
1537

1538
	for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1539
		__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1540 1541
}

1542
static int __send_empty_flush(struct clone_info *ci)
1543
{
1544
	unsigned target_nr = 0;
1545 1546
	struct dm_target *ti;

1547
	BUG_ON(bio_has_data(ci->bio));
1548
	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1549
		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1550 1551 1552 1553

	return 0;
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1554
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1555
				     sector_t sector, unsigned *len)
Mike Snitzer's avatar
Mike Snitzer committed
1556
{
1557
	struct bio *bio = ci->bio;
Mike Snitzer's avatar
Mike Snitzer committed
1558
	struct dm_target_io *tio;
1559 1560
	unsigned target_bio_nr;
	unsigned num_target_bios = 1;
Mike Snitzer's avatar
Mike Snitzer committed
1561

1562 1563 1564 1565 1566
	/*
	 * Does the target want to receive duplicate copies of the bio?
	 */
	if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
		num_target_bios = ti->num_write_bios(ti, bio);
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1567

1568
	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1569
		tio = alloc_tio(ci, ti, target_bio_nr);
1570 1571
		tio->len_ptr = len;
		clone_bio(tio, bio, sector, *len);
1572 1573
		__map_bio(tio);
	}
Mike Snitzer's avatar
Mike Snitzer committed
1574 1575
}

1576
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
Mike Snitzer's avatar
Mike Snitzer committed
1577

1578
static unsigned get_num_discard_bios(struct dm_target *ti)
Mike Snitzer's avatar
Mike Snitzer committed
1579
{
1580
	return ti->num_discard_bios;
Mike Snitzer's avatar
Mike Snitzer committed
1581 1582
}

1583
static unsigned get_num_write_same_bios(struct dm_target *ti)
Mike Snitzer's avatar
Mike Snitzer committed
1584
{
1585
	return ti->num_write_same_bios;
Mike Snitzer's avatar
Mike Snitzer committed
1586 1587 1588 1589 1590 1591
}

typedef bool (*is_split_required_fn)(struct dm_target *ti);

static bool is_split_required_for_discard(struct dm_target *ti)
{
1592
	return ti->split_discard_bios;
Mike Snitzer's avatar
Mike Snitzer committed
1593 1594
}

1595 1596 1597
static int __send_changing_extent_only(struct clone_info *ci,
				       get_num_bios_fn get_num_bios,
				       is_split_required_fn is_split_required)
Mike Snitzer's avatar
Mike Snitzer committed
1598 1599
{
	struct dm_target *ti;
1600
	unsigned len;
1601
	unsigned num_bios;
Mike Snitzer's avatar
Mike Snitzer committed
1602

1603 1604 1605 1606
	do {
		ti = dm_table_find_target(ci->map, ci->sector);
		if (!dm_target_is_valid(ti))
			return -EIO;
Mike Snitzer's avatar
Mike Snitzer committed
1607 1608

		/*
Mike Snitzer's avatar
Mike Snitzer committed
1609 1610
		 * Even though the device advertised support for this type of
		 * request, that does not mean every target supports it, and
Mike Snitzer's avatar
Mike Snitzer committed
1611
		 * reconfiguration might also have changed that since the
1612
		 * check was performed.
Mike Snitzer's avatar
Mike Snitzer committed
1613
		 */
1614 1615
		num_bios = get_num_bios ? get_num_bios(ti) : 0;
		if (!num_bios)
1616
			return -EOPNOTSUPP;
Mike Snitzer's avatar
Mike Snitzer committed
1617

Mike Snitzer's avatar
Mike Snitzer committed
1618
		if (is_split_required && !is_split_required(ti))
1619
			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1620
		else
1621
			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1622

1623
		__send_duplicate_bios(ci, ti, num_bios, &len);
1624 1625 1626

		ci->sector += len;
	} while (ci->sector_count -= len);
Mike Snitzer's avatar
Mike Snitzer committed
1627 1628 1629 1630

	return 0;
}

1631
static int __send_discard(struct clone_info *ci)
Mike Snitzer's avatar
Mike Snitzer committed
1632
{
1633 1634
	return __send_changing_extent_only(ci, get_num_discard_bios,
					   is_split_required_for_discard);
Mike Snitzer's avatar
Mike Snitzer committed
1635 1636
}

1637
static int __send_write_same(struct clone_info *ci)
Mike Snitzer's avatar
Mike Snitzer committed
1638
{
1639
	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
Mike Snitzer's avatar
Mike Snitzer committed
1640 1641
}

Alasdair G Kergon's avatar
Alasdair G Kergon committed
1642 1643 1644
/*
 * Select the correct strategy for processing a non-flush bio.
 */
1645
static int __split_and_process_non_flush(struct clone_info *ci)
Linus Torvalds's avatar
Linus Torvalds committed
1646
{
1647
	struct bio *bio = ci->bio;
1648
	struct dm_target *ti;
1649
	unsigned len;
Linus Torvalds's avatar
Linus Torvalds committed
1650

Mike Snitzer's avatar
Mike Snitzer committed
1651
	if (unlikely(bio->bi_rw & REQ_DISCARD))
1652
		return __send_discard(ci);
Mike Snitzer's avatar
Mike Snitzer committed
1653
	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1654
		return __send_write_same(ci);
Mike Snitzer's avatar
Mike Snitzer committed
1655

1656 1657 1658 1659
	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

1660
	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
Linus Torvalds's avatar
Linus Torvalds committed
1661

1662
	__clone_and_map_data_bio(ci, ti, ci->sector, &len);
Linus Torvalds's avatar
Linus Torvalds committed
1663

1664 1665
	ci->sector += len;
	ci->sector_count -= len;
Linus Torvalds's avatar
Linus Torvalds committed
1666

1667
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1668 1669 1670
}

/*
1671
 * Entry point to split a bio into clones and submit them to the targets.
Linus Torvalds's avatar
Linus Torvalds committed
1672
 */
1673 1674
static void __split_and_process_bio(struct mapped_device *md,
				    struct dm_table *map, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
1675 1676
{
	struct clone_info ci;
1677
	int error = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1678

1679
	if (unlikely(!map)) {
1680
		bio_io_error(bio);
1681 1682
		return;
	}
1683

1684
	ci.map = map;
Linus Torvalds's avatar
Linus Torvalds committed
1685 1686 1687 1688 1689 1690
	ci.md = md;
	ci.io = alloc_io(md);
	ci.io->error = 0;
	atomic_set(&ci.io->io_count, 1);
	ci.io->bio = bio;
	ci.io->md = md;
1691
	spin_lock_init(&ci.io->endio_lock);
1692
	ci.sector = bio->bi_iter.bi_sector;
Linus Torvalds's avatar
Linus Torvalds committed
1693

1694
	start_io_acct(ci.io);
Alasdair G Kergon's avatar
Alasdair G Kergon committed
1695

1696 1697 1698
	if (bio->bi_rw & REQ_FLUSH) {
		ci.bio = &ci.md->flush_bio;
		ci.sector_count = 0;
1699
		error = __send_empty_flush(&ci);
1700 1701
		/* dec_pending submits any data associated with flush */
	} else {
1702
		ci.bio = bio;
1703
		ci.sector_count = bio_sectors(bio);
1704
		while (ci.sector_count && !error)
1705
			error = __split_and_process_non_flush(&ci);
1706
	}
Linus Torvalds's avatar
Linus Torvalds committed
1707 1708

	/* drop the extra reference count */
1709
	dec_pending(ci.io, error);
Linus Torvalds's avatar
Linus Torvalds committed
1710 1711 1712 1713 1714
}
/*-----------------------------------------------------------------
 * CRUD END
 *---------------------------------------------------------------*/

Milan Broz's avatar
Milan Broz committed
1715 1716 1717 1718 1719
static int dm_merge_bvec(struct request_queue *q,
			 struct bvec_merge_data *bvm,
			 struct bio_vec *biovec)
{
	struct mapped_device *md = q->queuedata;
1720
	struct dm_table *map = dm_get_live_table_fast(md);
Milan Broz's avatar
Milan Broz committed
1721
	struct dm_target *ti;
1722 1723
	sector_t max_sectors;
	int max_size = 0;
Milan Broz's avatar
Milan Broz committed
1724 1725

	if (unlikely(!map))
1726
		goto out;
Milan Broz's avatar
Milan Broz committed
1727 1728

	ti = dm_table_find_target(map, bvm->bi_sector);
1729
	if (!dm_target_is_valid(ti))
1730
		goto out;
Milan Broz's avatar
Milan Broz committed
1731 1732 1733 1734

	/*
	 * Find maximum amount of I/O that won't need splitting
	 */
1735
	max_sectors = min(max_io_len(bvm->bi_sector, ti),
1736
			  (sector_t) BIO_MAX_SECTORS);
Milan Broz's avatar
Milan Broz committed
1737
	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1738 1739
	if (max_size < 0)
		max_size = 0;
Milan Broz's avatar
Milan Broz committed
1740 1741 1742 1743 1744 1745 1746

	/*
	 * merge_bvec_fn() returns number of bytes
	 * it can accept at this offset
	 * max is precomputed maximal io size
	 */
	if (max_size && ti->type->merge)
1747
		max_size = ti->type->merge(ti, bvm, biovec, max_size);
1748 1749
	/*
	 * If the target doesn't support merge method and some of the devices
1750 1751 1752 1753
	 * provided their merge_bvec method (we know this by looking at
	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
	 * entries.  So always set max_size to 0, and the code below allows
	 * just one page.
1754 1755 1756
	 */
	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
		max_size = 0;
Milan Broz's avatar
Milan Broz committed
1757

1758
out:
1759
	dm_put_live_table_fast(md);
Milan Broz's avatar
Milan Broz committed
1760 1761 1762 1763 1764 1765 1766 1767 1768
	/*
	 * Always allow an entire first page
	 */
	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
		max_size = biovec->bv_len;

	return max_size;
}

Linus Torvalds's avatar
Linus Torvalds committed
1769 1770 1771 1772
/*
 * The request function that just remaps the bio built up by
 * dm_merge_bvec.
 */
1773
static void dm_make_request(struct request_queue *q, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
1774
{
1775
	int rw = bio_data_dir(bio);
Linus Torvalds's avatar
Linus Torvalds committed
1776
	struct mapped_device *md = q->queuedata;
1777 1778
	int srcu_idx;
	struct dm_table *map;
Linus Torvalds's avatar
Linus Torvalds committed
1779

1780
	map = dm_get_live_table(md, &srcu_idx);
Linus Torvalds's avatar
Linus Torvalds committed
1781

1782
	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1783

1784 1785
	/* if we're suspended, we have to queue this io for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1786
		dm_put_live_table(md, srcu_idx);
Linus Torvalds's avatar
Linus Torvalds committed
1787

1788 1789 1790
		if (bio_rw(bio) != READA)
			queue_io(md, bio);
		else
1791
			bio_io_error(bio);
1792
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1793 1794
	}

1795 1796
	__split_and_process_bio(md, map, bio);
	dm_put_live_table(md, srcu_idx);
1797
	return;
1798 1799
}

Mikulas Patocka's avatar
Mikulas Patocka committed
1800
int dm_request_based(struct mapped_device *md)
1801 1802 1803 1804
{
	return blk_queue_stackable(md->queue);
}

1805
static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
1806 1807 1808
{
	int r;

1809 1810
	if (blk_queue_io_stat(clone->q))
		clone->cmd_flags |= REQ_IO_STAT;
1811

1812 1813
	clone->start_time = jiffies;
	r = blk_insert_cloned_request(clone->q, clone);
1814
	if (r)
1815
		/* must complete clone in terms of original request */
1816 1817 1818 1819 1820 1821 1822
		dm_complete_request(rq, r);
}

static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
				 void *data)
{
	struct dm_rq_target_io *tio = data;
1823 1824
	struct dm_rq_clone_bio_info *info =
		container_of(bio, struct dm_rq_clone_bio_info, clone);
1825 1826 1827 1828 1829 1830 1831 1832 1833

	info->orig = bio_orig;
	info->tio = tio;
	bio->bi_end_io = end_clone_bio;

	return 0;
}

static int setup_clone(struct request *clone, struct request *rq,
1834
		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
1835
{
1836
	int r;
1837

1838
	r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1839 1840 1841
			      dm_rq_bio_constructor, tio);
	if (r)
		return r;
1842

1843 1844 1845
	clone->cmd = rq->cmd;
	clone->cmd_len = rq->cmd_len;
	clone->sense = rq->sense;
1846 1847 1848
	clone->end_io = end_clone_request;
	clone->end_io_data = tio;

1849 1850
	tio->clone = clone;

1851 1852 1853
	return 0;
}

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1854
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1855
				struct dm_rq_target_io *tio, gfp_t gfp_mask)
1856
{
1857 1858 1859 1860 1861 1862
	/*
	 * Do not allocate a clone if tio->clone was already set
	 * (see: dm_mq_queue_rq).
	 */
	bool alloc_clone = !tio->clone;
	struct request *clone;
1863

1864 1865 1866 1867 1868 1869
	if (alloc_clone) {
		clone = alloc_clone_request(md, gfp_mask);
		if (!clone)
			return NULL;
	} else
		clone = tio->clone;
1870 1871 1872 1873

	blk_rq_init(NULL, clone);
	if (setup_clone(clone, rq, tio, gfp_mask)) {
		/* -ENOMEM */
1874 1875
		if (alloc_clone)
			free_clone_request(md, clone);
1876 1877 1878 1879 1880 1881
		return NULL;
	}

	return clone;
}

1882 1883
static void map_tio_request(struct kthread_work *work);

1884 1885 1886 1887 1888 1889 1890 1891 1892
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
		     struct mapped_device *md)
{
	tio->md = md;
	tio->ti = NULL;
	tio->clone = NULL;
	tio->orig = rq;
	tio->error = 0;
	memset(&tio->info, 0, sizeof(tio->info));
1893 1894
	if (md->kworker_task)
		init_kthread_work(&tio->work, map_tio_request);
1895 1896
}

1897 1898
static struct dm_rq_target_io *prep_tio(struct request *rq,
					struct mapped_device *md, gfp_t gfp_mask)
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1899 1900
{
	struct dm_rq_target_io *tio;
1901 1902
	int srcu_idx;
	struct dm_table *table;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1903 1904 1905 1906 1907

	tio = alloc_rq_tio(md, gfp_mask);
	if (!tio)
		return NULL;

1908
	init_tio(tio, rq, md);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1909

1910 1911 1912 1913 1914 1915 1916
	table = dm_get_live_table(md, &srcu_idx);
	if (!dm_table_mq_request_based(table)) {
		if (!clone_rq(rq, md, tio, gfp_mask)) {
			dm_put_live_table(md, srcu_idx);
			free_rq_tio(tio);
			return NULL;
		}
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1917
	}
1918
	dm_put_live_table(md, srcu_idx);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1919

1920
	return tio;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
1921 1922
}

1923 1924 1925 1926 1927 1928
/*
 * Called with the queue lock held.
 */
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
	struct mapped_device *md = q->queuedata;
1929
	struct dm_rq_target_io *tio;
1930 1931 1932 1933 1934 1935

	if (unlikely(rq->special)) {
		DMWARN("Already has something in rq->special.");
		return BLKPREP_KILL;
	}

1936 1937
	tio = prep_tio(rq, md, GFP_ATOMIC);
	if (!tio)
1938 1939
		return BLKPREP_DEFER;

1940
	rq->special = tio;
1941 1942 1943 1944 1945
	rq->cmd_flags |= REQ_DONTPREP;

	return BLKPREP_OK;
}

1946 1947
/*
 * Returns:
1948 1949 1950
 * 0                : the request has been processed
 * DM_MAPIO_REQUEUE : the original request needs to be requeued
 * < 0              : the request was completed due to failure
1951
 */
1952
static int map_request(struct dm_rq_target_io *tio, struct request *rq,
1953
		       struct mapped_device *md)
1954
{
1955
	int r;
1956
	struct dm_target *ti = tio->ti;
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	struct request *clone = NULL;

	if (tio->clone) {
		clone = tio->clone;
		r = ti->type->map_rq(ti, clone, &tio->info);
	} else {
		r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
		if (r < 0) {
			/* The target wants to complete the I/O */
			dm_kill_unmapped_request(rq, r);
			return r;
		}
1969 1970
		if (r != DM_MAPIO_REMAPPED)
			return r;
1971
		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
1972 1973 1974 1975 1976
			/* -ENOMEM */
			ti->type->release_clone_rq(clone);
			return DM_MAPIO_REQUEUE;
		}
	}
1977 1978 1979 1980 1981 1982 1983

	switch (r) {
	case DM_MAPIO_SUBMITTED:
		/* The target has taken the I/O to submit by itself later */
		break;
	case DM_MAPIO_REMAPPED:
		/* The target has remapped the I/O so dispatch it */
1984
		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1985 1986
				     blk_rq_pos(rq));
		dm_dispatch_clone_request(clone, rq);
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
		break;
	case DM_MAPIO_REQUEUE:
		/* The target wants to requeue the I/O */
		dm_requeue_unmapped_request(clone);
		break;
	default:
		if (r > 0) {
			DMWARN("unimplemented target map return value: %d", r);
			BUG();
		}

		/* The target wants to complete the I/O */
1999
		dm_kill_unmapped_request(rq, r);
2000
		return r;
2001
	}
2002

2003
	return 0;
2004 2005
}

2006
static void map_tio_request(struct kthread_work *work)
2007
{
2008
	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
2009 2010
	struct request *rq = tio->orig;
	struct mapped_device *md = tio->md;
2011

2012
	if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
2013
		dm_requeue_unmapped_original_request(md, rq);
2014 2015
}

2016
static void dm_start_request(struct mapped_device *md, struct request *orig)
2017
{
2018 2019 2020 2021
	if (!orig->q->mq_ops)
		blk_start_request(orig);
	else
		blk_mq_start_request(orig);
2022
	atomic_inc(&md->pending[rq_data_dir(orig)]);
2023

2024 2025 2026 2027 2028
	if (md->seq_rq_merge_deadline_usecs) {
		md->last_rq_pos = rq_end_sector(orig);
		md->last_rq_rw = rq_data_dir(orig);
		md->last_rq_start_time = ktime_get();
	}
2029

2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
	/*
	 * Hold the md reference here for the in-flight I/O.
	 * We can't rely on the reference count by device opener,
	 * because the device may be closed during the request completion
	 * when all bios are completed.
	 * See the comment in rq_completed() too.
	 */
	dm_get(md);
}

2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000

ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
{
	return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
}

ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
						     const char *buf, size_t count)
{
	unsigned deadline;

2052
	if (!dm_request_based(md) || md->use_blk_mq)
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
		return count;

	if (kstrtouint(buf, 10, &deadline))
		return -EINVAL;

	if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
		deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;

	md->seq_rq_merge_deadline_usecs = deadline;

	return count;
}

static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
{
	ktime_t kt_deadline;

	if (!md->seq_rq_merge_deadline_usecs)
		return false;

	kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
	kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);

	return !ktime_after(ktime_get(), kt_deadline);
}

2079 2080 2081 2082 2083 2084 2085
/*
 * q->request_fn for request-based dm.
 * Called with the queue lock held.
 */
static void dm_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
2086 2087
	int srcu_idx;
	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
2088
	struct dm_target *ti;
2089
	struct request *rq;
2090
	struct dm_rq_target_io *tio;
2091
	sector_t pos;
2092 2093

	/*
2094 2095 2096 2097
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
2098
	 */
Jens Axboe's avatar
Jens Axboe committed
2099
	while (!blk_queue_stopped(q)) {
2100 2101
		rq = blk_peek_request(q);
		if (!rq)
2102
			goto out;
2103

2104 2105 2106 2107 2108 2109
		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (!(rq->cmd_flags & REQ_FLUSH))
			pos = blk_rq_pos(rq);

		ti = dm_table_find_target(map, pos);
2110 2111
		if (!dm_target_is_valid(ti)) {
			/*
2112
			 * Must perform setup, that rq_completed() requires,
2113 2114 2115
			 * before calling dm_kill_unmapped_request
			 */
			DMERR_LIMIT("request attempted access beyond the end of device");
2116 2117
			dm_start_request(md, rq);
			dm_kill_unmapped_request(rq, -EIO);
2118 2119
			continue;
		}
2120

2121 2122
		if (dm_request_peeked_before_merge_deadline(md) &&
		    md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2123 2124 2125
		    md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
			goto delay_and_out;

2126
		if (ti->type->busy && ti->type->busy(ti))
Jens Axboe's avatar
Jens Axboe committed
2127
			goto delay_and_out;
2128

2129
		dm_start_request(md, rq);
2130

2131
		tio = tio_from_request(rq);
2132 2133 2134
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		queue_kthread_work(&md->kworker, &tio->work);
2135
		BUG_ON(!irqs_disabled());
2136 2137 2138 2139
	}

	goto out;

Jens Axboe's avatar
Jens Axboe committed
2140
delay_and_out:
2141
	blk_delay_queue(q, HZ / 100);
2142
out:
2143
	dm_put_live_table(md, srcu_idx);
2144 2145
}

Linus Torvalds's avatar
Linus Torvalds committed
2146 2147
static int dm_any_congested(void *congested_data, int bdi_bits)
{
2148 2149 2150
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
Linus Torvalds's avatar
Linus Torvalds committed
2151

2152
	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2153
		map = dm_get_live_table_fast(md);
2154
		if (map) {
2155 2156 2157 2158 2159 2160 2161 2162 2163
			/*
			 * Request-based dm cares about only own queue for
			 * the query about congestion status of request_queue
			 */
			if (dm_request_based(md))
				r = md->queue->backing_dev_info.state &
				    bdi_bits;
			else
				r = dm_table_any_congested(map, bdi_bits);
2164
		}
2165
		dm_put_live_table_fast(md);
2166 2167
	}

Linus Torvalds's avatar
Linus Torvalds committed
2168 2169 2170 2171 2172 2173
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
2174
static void free_minor(int minor)
Linus Torvalds's avatar
Linus Torvalds committed
2175
{
2176
	spin_lock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2177
	idr_remove(&_minor_idr, minor);
2178
	spin_unlock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2179 2180 2181 2182 2183
}

/*
 * See if the device with a specific minor # is free.
 */
2184
static int specific_minor(int minor)
Linus Torvalds's avatar
Linus Torvalds committed
2185
{
Tejun Heo's avatar
Tejun Heo committed
2186
	int r;
Linus Torvalds's avatar
Linus Torvalds committed
2187 2188 2189 2190

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

Tejun Heo's avatar
Tejun Heo committed
2191
	idr_preload(GFP_KERNEL);
2192
	spin_lock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2193

Tejun Heo's avatar
Tejun Heo committed
2194
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
Linus Torvalds's avatar
Linus Torvalds committed
2195

2196
	spin_unlock(&_minor_lock);
Tejun Heo's avatar
Tejun Heo committed
2197 2198 2199 2200
	idr_preload_end();
	if (r < 0)
		return r == -ENOSPC ? -EBUSY : r;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
2201 2202
}

2203
static int next_free_minor(int *minor)
Linus Torvalds's avatar
Linus Torvalds committed
2204
{
Tejun Heo's avatar
Tejun Heo committed
2205
	int r;
Jeff Mahoney's avatar
Jeff Mahoney committed
2206

Tejun Heo's avatar
Tejun Heo committed
2207
	idr_preload(GFP_KERNEL);
2208
	spin_lock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2209

Tejun Heo's avatar
Tejun Heo committed
2210
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
Linus Torvalds's avatar
Linus Torvalds committed
2211

2212
	spin_unlock(&_minor_lock);
Tejun Heo's avatar
Tejun Heo committed
2213 2214 2215 2216 2217
	idr_preload_end();
	if (r < 0)
		return r;
	*minor = r;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
2218 2219
}

2220
static const struct block_device_operations dm_blk_dops;
Linus Torvalds's avatar
Linus Torvalds committed
2221

2222 2223
static void dm_wq_work(struct work_struct *work);

2224 2225 2226 2227
static void dm_init_md_queue(struct mapped_device *md)
{
	/*
	 * Request-based dm devices cannot be stacked on top of bio-based dm
2228
	 * devices.  The type of this dm device may not have been decided yet.
2229 2230 2231 2232 2233 2234 2235
	 * The type is decided at the first table loading time.
	 * To prevent problematic device stacking, clear the queue flag
	 * for request stacking support until then.
	 *
	 * This queue is new, so no concurrency on the queue_flags.
	 */
	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
2236
}
2237

2238 2239
static void dm_init_old_md_queue(struct mapped_device *md)
{
2240
	md->use_blk_mq = false;
2241 2242 2243 2244 2245
	dm_init_md_queue(md);

	/*
	 * Initialize aspects of queue that aren't relevant for blk-mq
	 */
2246 2247 2248
	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
2249

2250 2251 2252
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}

Linus Torvalds's avatar
Linus Torvalds committed
2253 2254 2255
/*
 * Allocate and initialise a blank device with a given minor.
 */
2256
static struct mapped_device *alloc_dev(int minor)
Linus Torvalds's avatar
Linus Torvalds committed
2257 2258
{
	int r;
2259
	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2260
	void *old_md;
Linus Torvalds's avatar
Linus Torvalds committed
2261 2262 2263 2264 2265 2266

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

2267
	if (!try_module_get(THIS_MODULE))
Milan Broz's avatar
Milan Broz committed
2268
		goto bad_module_get;
2269

Linus Torvalds's avatar
Linus Torvalds committed
2270
	/* get a minor number for the dev */
2271
	if (minor == DM_ANY_MINOR)
2272
		r = next_free_minor(&minor);
2273
	else
2274
		r = specific_minor(minor);
Linus Torvalds's avatar
Linus Torvalds committed
2275
	if (r < 0)
Milan Broz's avatar
Milan Broz committed
2276
		goto bad_minor;
Linus Torvalds's avatar
Linus Torvalds committed
2277

2278 2279 2280 2281
	r = init_srcu_struct(&md->io_barrier);
	if (r < 0)
		goto bad_io_barrier;

2282
	md->use_blk_mq = use_blk_mq;
2283
	md->type = DM_TYPE_NONE;
2284
	mutex_init(&md->suspend_lock);
2285
	mutex_init(&md->type_lock);
2286
	mutex_init(&md->table_devices_lock);
2287
	spin_lock_init(&md->deferred_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2288
	atomic_set(&md->holders, 1);
2289
	atomic_set(&md->open_count, 0);
Linus Torvalds's avatar
Linus Torvalds committed
2290
	atomic_set(&md->event_nr, 0);
Mike Anderson's avatar
Mike Anderson committed
2291 2292
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
2293
	INIT_LIST_HEAD(&md->table_devices);
Mike Anderson's avatar
Mike Anderson committed
2294
	spin_lock_init(&md->uevent_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2295

2296
	md->queue = blk_alloc_queue(GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
2297
	if (!md->queue)
Milan Broz's avatar
Milan Broz committed
2298
		goto bad_queue;
Linus Torvalds's avatar
Linus Torvalds committed
2299

2300
	dm_init_md_queue(md);
Stefan Bader's avatar
Stefan Bader committed
2301

Linus Torvalds's avatar
Linus Torvalds committed
2302 2303
	md->disk = alloc_disk(1);
	if (!md->disk)
Milan Broz's avatar
Milan Broz committed
2304
		goto bad_disk;
Linus Torvalds's avatar
Linus Torvalds committed
2305

2306 2307
	atomic_set(&md->pending[0], 0);
	atomic_set(&md->pending[1], 0);
2308
	init_waitqueue_head(&md->wait);
2309
	INIT_WORK(&md->work, dm_wq_work);
2310
	init_waitqueue_head(&md->eventq);
2311
	init_completion(&md->kobj_holder.completion);
2312
	md->kworker_task = NULL;
2313

Linus Torvalds's avatar
Linus Torvalds committed
2314 2315 2316 2317 2318 2319 2320
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
Mike Anderson's avatar
Mike Anderson committed
2321
	format_dev_t(md->name, MKDEV(_major, minor));
Linus Torvalds's avatar
Linus Torvalds committed
2322

Tejun Heo's avatar
Tejun Heo committed
2323
	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2324 2325 2326
	if (!md->wq)
		goto bad_thread;

2327 2328 2329 2330
	md->bdev = bdget_disk(md->disk, 0);
	if (!md->bdev)
		goto bad_bdev;

2331 2332 2333 2334
	bio_init(&md->flush_bio);
	md->flush_bio.bi_bdev = md->bdev;
	md->flush_bio.bi_rw = WRITE_FLUSH;

Mikulas Patocka's avatar
Mikulas Patocka committed
2335 2336
	dm_stats_init(&md->stats);

2337
	/* Populate the mapping, nobody knows we exist yet */
2338
	spin_lock(&_minor_lock);
2339
	old_md = idr_replace(&_minor_idr, md, minor);
2340
	spin_unlock(&_minor_lock);
2341 2342 2343

	BUG_ON(old_md != MINOR_ALLOCED);

Linus Torvalds's avatar
Linus Torvalds committed
2344 2345
	return md;

2346 2347
bad_bdev:
	destroy_workqueue(md->wq);
2348
bad_thread:
2349
	del_gendisk(md->disk);
2350
	put_disk(md->disk);
Milan Broz's avatar
Milan Broz committed
2351
bad_disk:
2352
	blk_cleanup_queue(md->queue);
Milan Broz's avatar
Milan Broz committed
2353
bad_queue:
2354 2355
	cleanup_srcu_struct(&md->io_barrier);
bad_io_barrier:
Linus Torvalds's avatar
Linus Torvalds committed
2356
	free_minor(minor);
Milan Broz's avatar
Milan Broz committed
2357
bad_minor:
2358
	module_put(THIS_MODULE);
Milan Broz's avatar
Milan Broz committed
2359
bad_module_get:
Linus Torvalds's avatar
Linus Torvalds committed
2360 2361 2362 2363
	kfree(md);
	return NULL;
}

Jun'ichi Nomura's avatar
Jun'ichi Nomura committed
2364 2365
static void unlock_fs(struct mapped_device *md);

Linus Torvalds's avatar
Linus Torvalds committed
2366 2367
static void free_dev(struct mapped_device *md)
{
2368
	int minor = MINOR(disk_devt(md->disk));
2369

2370
	unlock_fs(md);
2371
	destroy_workqueue(md->wq);
2372 2373 2374

	if (md->kworker_task)
		kthread_stop(md->kworker_task);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2375 2376
	if (md->io_pool)
		mempool_destroy(md->io_pool);
2377 2378
	if (md->rq_pool)
		mempool_destroy(md->rq_pool);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2379 2380
	if (md->bs)
		bioset_free(md->bs);
2381

2382
	cleanup_srcu_struct(&md->io_barrier);
2383
	free_table_devices(&md->table_devices);
2384
	dm_stats_cleanup(&md->stats);
Jeff Mahoney's avatar
Jeff Mahoney committed
2385 2386 2387 2388

	spin_lock(&_minor_lock);
	md->disk->private_data = NULL;
	spin_unlock(&_minor_lock);
2389 2390 2391
	if (blk_get_integrity(md->disk))
		blk_integrity_unregister(md->disk);
	del_gendisk(md->disk);
Linus Torvalds's avatar
Linus Torvalds committed
2392
	put_disk(md->disk);
2393
	blk_cleanup_queue(md->queue);
2394
	if (md->use_blk_mq)
2395
		blk_mq_free_tag_set(&md->tag_set);
2396 2397 2398
	bdput(md->bdev);
	free_minor(minor);

2399
	module_put(THIS_MODULE);
Linus Torvalds's avatar
Linus Torvalds committed
2400 2401 2402
	kfree(md);
}

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2403 2404
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
Mikulas Patocka's avatar
Mikulas Patocka committed
2405
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2406

2407
	if (md->bs) {
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
		/* The md already has necessary mempools. */
		if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
			/*
			 * Reload bioset because front_pad may have changed
			 * because a different table was loaded.
			 */
			bioset_free(md->bs);
			md->bs = p->bs;
			p->bs = NULL;
		}
2418 2419 2420 2421 2422 2423 2424 2425
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
		 * Note for future: If you are to reload bioset,
		 * prep-ed requests in the queue may refer
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2426
		goto out;
Mikulas Patocka's avatar
Mikulas Patocka committed
2427
	}
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2428

2429
	BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2430 2431 2432

	md->io_pool = p->io_pool;
	p->io_pool = NULL;
2433 2434
	md->rq_pool = p->rq_pool;
	p->rq_pool = NULL;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2435 2436 2437 2438
	md->bs = p->bs;
	p->bs = NULL;

out:
2439
	/* mempool bind completed, no longer need any mempools in the table */
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2440 2441 2442
	dm_table_free_md_mempools(t);
}

Linus Torvalds's avatar
Linus Torvalds committed
2443 2444 2445 2446 2447
/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
Mike Anderson's avatar
Mike Anderson committed
2448 2449
	unsigned long flags;
	LIST_HEAD(uevents);
Linus Torvalds's avatar
Linus Torvalds committed
2450 2451
	struct mapped_device *md = (struct mapped_device *) context;

Mike Anderson's avatar
Mike Anderson committed
2452 2453 2454 2455
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

2456
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
Mike Anderson's avatar
Mike Anderson committed
2457

Linus Torvalds's avatar
Linus Torvalds committed
2458 2459 2460 2461
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
}

2462 2463 2464
/*
 * Protected by md->suspend_lock obtained by dm_swap_table().
 */
2465
static void __set_size(struct mapped_device *md, sector_t size)
Linus Torvalds's avatar
Linus Torvalds committed
2466
{
2467
	set_capacity(md->disk, size);
Linus Torvalds's avatar
Linus Torvalds committed
2468

2469
	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
Linus Torvalds's avatar
Linus Torvalds committed
2470 2471
}

2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
/*
 * Return 1 if the queue has a compulsory merge_bvec_fn function.
 *
 * If this function returns 0, then the device is either a non-dm
 * device without a merge_bvec_fn, or it is a dm device that is
 * able to split any bios it receives that are too big.
 */
int dm_queue_merge_is_compulsory(struct request_queue *q)
{
	struct mapped_device *dev_md;

	if (!q->merge_bvec_fn)
		return 0;

2486
	if (q->make_request_fn == dm_make_request) {
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
		dev_md = q->queuedata;
		if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
			return 0;
	}

	return 1;
}

static int dm_device_merge_is_compulsory(struct dm_target *ti,
					 struct dm_dev *dev, sector_t start,
					 sector_t len, void *data)
{
	struct block_device *bdev = dev->bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	return dm_queue_merge_is_compulsory(q);
}

/*
 * Return 1 if it is acceptable to ignore merge_bvec_fn based
 * on the properties of the underlying devices.
 */
static int dm_table_merge_is_optional(struct dm_table *table)
{
	unsigned i = 0;
	struct dm_target *ti;

	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
			return 0;
	}

	return 1;
}

2525 2526 2527 2528 2529
/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
Linus Torvalds's avatar
Linus Torvalds committed
2530
{
2531
	struct dm_table *old_map;
2532
	struct request_queue *q = md->queue;
Linus Torvalds's avatar
Linus Torvalds committed
2533
	sector_t size;
2534
	int merge_is_optional;
Linus Torvalds's avatar
Linus Torvalds committed
2535 2536

	size = dm_table_get_size(t);
Darrick J. Wong's avatar
Darrick J. Wong committed
2537 2538 2539 2540

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
Mikulas Patocka's avatar
Mikulas Patocka committed
2541
	if (size != dm_get_size(md))
Darrick J. Wong's avatar
Darrick J. Wong committed
2542 2543
		memset(&md->geometry, 0, sizeof(md->geometry));

2544
	__set_size(md, size);
2545

2546 2547
	dm_table_event_callback(t, event_callback, md);

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2548 2549 2550 2551 2552 2553 2554
	/*
	 * The queue hasn't been stopped yet, if the old table type wasn't
	 * for request-based during suspension.  So stop it to prevent
	 * I/O mapping before resume.
	 * This must be done before setting the queue restrictions,
	 * because request-based dm may be run just after the setting.
	 */
2555
	if (dm_table_request_based(t))
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
2556 2557 2558 2559
		stop_queue(q);

	__bind_mempools(md, t);

2560 2561
	merge_is_optional = dm_table_merge_is_optional(t);

2562
	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2563
	rcu_assign_pointer(md->map, t);
2564 2565
	md->immutable_target_type = dm_table_get_immutable_target_type(t);

2566
	dm_table_set_restrictions(t, q, limits);
2567 2568 2569 2570
	if (merge_is_optional)
		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
	else
		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2571 2572
	if (old_map)
		dm_sync_table(md);
Linus Torvalds's avatar
Linus Torvalds committed
2573

2574
	return old_map;
Linus Torvalds's avatar
Linus Torvalds committed
2575 2576
}

2577 2578 2579 2580
/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
Linus Torvalds's avatar
Linus Torvalds committed
2581
{
2582
	struct dm_table *map = rcu_dereference_protected(md->map, 1);
Linus Torvalds's avatar
Linus Torvalds committed
2583 2584

	if (!map)
2585
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
2586 2587

	dm_table_event_callback(map, NULL, NULL);
2588
	RCU_INIT_POINTER(md->map, NULL);
2589
	dm_sync_table(md);
2590 2591

	return map;
Linus Torvalds's avatar
Linus Torvalds committed
2592 2593 2594 2595 2596
}

/*
 * Constructor for a new device.
 */
2597
int dm_create(int minor, struct mapped_device **result)
Linus Torvalds's avatar
Linus Torvalds committed
2598 2599 2600
{
	struct mapped_device *md;

2601
	md = alloc_dev(minor);
Linus Torvalds's avatar
Linus Torvalds committed
2602 2603 2604
	if (!md)
		return -ENXIO;

Milan Broz's avatar
Milan Broz committed
2605 2606
	dm_sysfs_init(md);

Linus Torvalds's avatar
Linus Torvalds committed
2607 2608 2609 2610
	*result = md;
	return 0;
}

2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{
	mutex_lock(&md->type_lock);
}

void dm_unlock_md_type(struct mapped_device *md)
{
	mutex_unlock(&md->type_lock);
}

void dm_set_md_type(struct mapped_device *md, unsigned type)
{
2627
	BUG_ON(!mutex_is_locked(&md->type_lock));
2628 2629 2630 2631 2632
	md->type = type;
}

unsigned dm_get_md_type(struct mapped_device *md)
{
2633
	BUG_ON(!mutex_is_locked(&md->type_lock));
2634 2635 2636
	return md->type;
}

2637 2638 2639 2640 2641
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
	return md->immutable_target_type;
}

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'.
 */
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
	BUG_ON(!atomic_read(&md->holders));
	return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);

2653 2654 2655 2656 2657 2658 2659 2660
static void init_rq_based_worker_thread(struct mapped_device *md)
{
	/* Initialize the request-based DM worker thread */
	init_kthread_worker(&md->kworker);
	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
				       "kdmwork-%s", dm_device_name(md));
}

2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
/*
 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
 */
static int dm_init_request_based_queue(struct mapped_device *md)
{
	struct request_queue *q = NULL;

	/* Fully initialize the queue */
	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
	if (!q)
2671
		return -EINVAL;
2672

2673 2674 2675
	/* disable dm_request_fn's merge heuristic by default */
	md->seq_rq_merge_deadline_usecs = 0;

2676
	md->queue = q;
2677
	dm_init_old_md_queue(md);
2678 2679 2680
	blk_queue_softirq_done(md->queue, dm_softirq_done);
	blk_queue_prep_rq(md->queue, dm_prep_fn);

2681
	init_rq_based_worker_thread(md);
2682

2683 2684
	elv_register_queue(md->queue);

2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
	return 0;
}

static int dm_mq_init_request(void *data, struct request *rq,
			      unsigned int hctx_idx, unsigned int request_idx,
			      unsigned int numa_node)
{
	struct mapped_device *md = data;
	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);

	/*
	 * Must initialize md member of tio, otherwise it won't
	 * be available in dm_mq_queue_rq.
	 */
	tio->md = md;

	return 0;
}

static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *bd)
{
	struct request *rq = bd->rq;
	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
	struct mapped_device *md = tio->md;
	int srcu_idx;
	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
	struct dm_target *ti;
	sector_t pos;

	/* always use block 0 to find the target for flushes for now */
	pos = 0;
	if (!(rq->cmd_flags & REQ_FLUSH))
		pos = blk_rq_pos(rq);

	ti = dm_table_find_target(map, pos);
	if (!dm_target_is_valid(ti)) {
		dm_put_live_table(md, srcu_idx);
		DMERR_LIMIT("request attempted access beyond the end of device");
		/*
		 * Must perform setup, that rq_completed() requires,
		 * before returning BLK_MQ_RQ_QUEUE_ERROR
		 */
		dm_start_request(md, rq);
		return BLK_MQ_RQ_QUEUE_ERROR;
	}
	dm_put_live_table(md, srcu_idx);

	if (ti->type->busy && ti->type->busy(ti))
		return BLK_MQ_RQ_QUEUE_BUSY;

	dm_start_request(md, rq);

	/* Init tio using md established in .init_request */
	init_tio(tio, rq, md);

2741 2742 2743 2744
	/*
	 * Establish tio->ti before queuing work (map_tio_request)
	 * or making direct call to map_request().
	 */
2745
	tio->ti = ti;
2746 2747 2748 2749 2750

	/* Clone the request if underlying devices aren't blk-mq */
	if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
		/* clone request is allocated at the end of the pdu */
		tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2751
		(void) clone_rq(rq, md, tio, GFP_ATOMIC);
2752 2753 2754
		queue_kthread_work(&md->kworker, &tio->work);
	} else {
		/* Direct call is fine since .queue_rq allows allocations */
2755 2756 2757 2758 2759
		if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
			/* Undo dm_start_request() before requeuing */
			rq_completed(md, rq_data_dir(rq), false);
			return BLK_MQ_RQ_QUEUE_BUSY;
		}
2760
	}
2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773

	return BLK_MQ_RQ_QUEUE_OK;
}

static struct blk_mq_ops dm_mq_ops = {
	.queue_rq = dm_mq_queue_rq,
	.map_queue = blk_mq_map_queue,
	.complete = dm_softirq_done,
	.init_request = dm_mq_init_request,
};

static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
{
2774
	unsigned md_type = dm_get_md_type(md);
2775 2776 2777 2778 2779 2780 2781 2782 2783
	struct request_queue *q;
	int err;

	memset(&md->tag_set, 0, sizeof(md->tag_set));
	md->tag_set.ops = &dm_mq_ops;
	md->tag_set.queue_depth = BLKDEV_MAX_RQ;
	md->tag_set.numa_node = NUMA_NO_NODE;
	md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	md->tag_set.nr_hw_queues = 1;
2784 2785 2786 2787 2788
	if (md_type == DM_TYPE_REQUEST_BASED) {
		/* make the memory for non-blk-mq clone part of the pdu */
		md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
	} else
		md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
	md->tag_set.driver_data = md;

	err = blk_mq_alloc_tag_set(&md->tag_set);
	if (err)
		return err;

	q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}
	md->queue = q;
	dm_init_md_queue(md);

	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	blk_mq_register_disk(md->disk);

2806 2807
	if (md_type == DM_TYPE_REQUEST_BASED)
		init_rq_based_worker_thread(md);
2808 2809 2810 2811 2812 2813

	return 0;

out_tag_set:
	blk_mq_free_tag_set(&md->tag_set);
	return err;
2814 2815
}

2816 2817 2818 2819 2820 2821 2822 2823
static unsigned filter_md_type(unsigned type, struct mapped_device *md)
{
	if (type == DM_TYPE_BIO_BASED)
		return type;

	return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
}

2824 2825 2826 2827 2828
/*
 * Setup the DM device's queue based on md's type
 */
int dm_setup_md_queue(struct mapped_device *md)
{
2829
	int r;
2830
	unsigned md_type = filter_md_type(dm_get_md_type(md), md);
2831 2832 2833 2834 2835

	switch (md_type) {
	case DM_TYPE_REQUEST_BASED:
		r = dm_init_request_based_queue(md);
		if (r) {
2836
			DMWARN("Cannot initialize queue for request-based mapped device");
2837
			return r;
2838
		}
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848
		break;
	case DM_TYPE_MQ_REQUEST_BASED:
		r = dm_init_request_based_blk_mq_queue(md);
		if (r) {
			DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
			return r;
		}
		break;
	case DM_TYPE_BIO_BASED:
		dm_init_old_md_queue(md);
2849 2850
		blk_queue_make_request(md->queue, dm_make_request);
		blk_queue_merge_bvec(md->queue, dm_merge_bvec);
2851
		break;
2852 2853 2854 2855 2856
	}

	return 0;
}

2857
struct mapped_device *dm_get_md(dev_t dev)
Linus Torvalds's avatar
Linus Torvalds committed
2858 2859 2860 2861 2862 2863 2864
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

2865
	spin_lock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2866 2867

	md = idr_find(&_minor_idr, minor);
2868 2869 2870 2871 2872 2873 2874 2875 2876
	if (md) {
		if ((md == MINOR_ALLOCED ||
		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
		     dm_deleting_md(md) ||
		     test_bit(DMF_FREEING, &md->flags))) {
			md = NULL;
			goto out;
		}
		dm_get(md);
Jeff Mahoney's avatar
Jeff Mahoney committed
2877
	}
Linus Torvalds's avatar
Linus Torvalds committed
2878

Jeff Mahoney's avatar
Jeff Mahoney committed
2879
out:
2880
	spin_unlock(&_minor_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2881

2882 2883
	return md;
}
Alasdair G Kergon's avatar
Alasdair G Kergon committed
2884
EXPORT_SYMBOL_GPL(dm_get_md);
2885

Alasdair G Kergon's avatar
Alasdair G Kergon committed
2886
void *dm_get_mdptr(struct mapped_device *md)
2887
{
Alasdair G Kergon's avatar
Alasdair G Kergon committed
2888
	return md->interface_ptr;
Linus Torvalds's avatar
Linus Torvalds committed
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
2899
	BUG_ON(test_bit(DMF_FREEING, &md->flags));
Linus Torvalds's avatar
Linus Torvalds committed
2900 2901
}

2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
int dm_hold(struct mapped_device *md)
{
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags)) {
		spin_unlock(&_minor_lock);
		return -EBUSY;
	}
	dm_get(md);
	spin_unlock(&_minor_lock);
	return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);

2915 2916 2917 2918 2919 2920
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

2921
static void __dm_destroy(struct mapped_device *md, bool wait)
Linus Torvalds's avatar
Linus Torvalds committed
2922
{
Mike Anderson's avatar
Mike Anderson committed
2923
	struct dm_table *map;
2924
	int srcu_idx;
Linus Torvalds's avatar
Linus Torvalds committed
2925

2926
	might_sleep();
Jeff Mahoney's avatar
Jeff Mahoney committed
2927

2928
	spin_lock(&_minor_lock);
2929 2930 2931 2932
	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
	set_bit(DMF_FREEING, &md->flags);
	spin_unlock(&_minor_lock);

2933
	if (dm_request_based(md) && md->kworker_task)
2934 2935
		flush_kthread_worker(&md->kworker);

2936 2937 2938 2939 2940
	/*
	 * Take suspend_lock so that presuspend and postsuspend methods
	 * do not race with internal suspend.
	 */
	mutex_lock(&md->suspend_lock);
2941
	map = dm_get_live_table(md, &srcu_idx);
2942 2943 2944
	if (!dm_suspended_md(md)) {
		dm_table_presuspend_targets(map);
		dm_table_postsuspend_targets(map);
Linus Torvalds's avatar
Linus Torvalds committed
2945
	}
2946 2947
	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
	dm_put_live_table(md, srcu_idx);
2948
	mutex_unlock(&md->suspend_lock);
2949

2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
	/*
	 * Rare, but there may be I/O requests still going to complete,
	 * for example.  Wait for all references to disappear.
	 * No one should increment the reference count of the mapped_device,
	 * after the mapped_device state becomes DMF_FREEING.
	 */
	if (wait)
		while (atomic_read(&md->holders))
			msleep(1);
	else if (atomic_read(&md->holders))
		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
		       dm_device_name(md), atomic_read(&md->holders));

	dm_sysfs_exit(md);
	dm_table_destroy(__unbind(md));
	free_dev(md);
}

void dm_destroy(struct mapped_device *md)
{
	__dm_destroy(md, true);
}

void dm_destroy_immediate(struct mapped_device *md)
{
	__dm_destroy(md, false);
}

void dm_put(struct mapped_device *md)
{
	atomic_dec(&md->holders);
Linus Torvalds's avatar
Linus Torvalds committed
2981
}
Edward Goggin's avatar
Edward Goggin committed
2982
EXPORT_SYMBOL_GPL(dm_put);
Linus Torvalds's avatar
Linus Torvalds committed
2983

2984
static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2985 2986
{
	int r = 0;
2987 2988 2989
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue(&md->wait, &wait);
2990 2991

	while (1) {
2992
		set_current_state(interruptible);
2993

2994
		if (!md_in_flight(md))
2995 2996
			break;

2997 2998
		if (interruptible == TASK_INTERRUPTIBLE &&
		    signal_pending(current)) {
2999 3000 3001 3002 3003 3004 3005 3006
			r = -EINTR;
			break;
		}

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

3007 3008
	remove_wait_queue(&md->wait, &wait);

3009 3010 3011
	return r;
}

Linus Torvalds's avatar
Linus Torvalds committed
3012 3013 3014
/*
 * Process the deferred bios
 */
3015
static void dm_wq_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
3016
{
3017 3018
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
3019
	struct bio *c;
3020 3021
	int srcu_idx;
	struct dm_table *map;
Linus Torvalds's avatar
Linus Torvalds committed
3022

3023
	map = dm_get_live_table(md, &srcu_idx);
3024

3025
	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
Alasdair G Kergon's avatar
Alasdair G Kergon committed
3026 3027 3028 3029
		spin_lock_irq(&md->deferred_lock);
		c = bio_list_pop(&md->deferred);
		spin_unlock_irq(&md->deferred_lock);

3030
		if (!c)
Alasdair G Kergon's avatar
Alasdair G Kergon committed
3031
			break;
3032

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3033 3034
		if (dm_request_based(md))
			generic_make_request(c);
3035
		else
3036
			__split_and_process_bio(md, map, c);
3037
	}
Milan Broz's avatar
Milan Broz committed
3038

3039
	dm_put_live_table(md, srcu_idx);
Linus Torvalds's avatar
Linus Torvalds committed
3040 3041
}

3042
static void dm_queue_flush(struct mapped_device *md)
3043
{
3044
	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3045
	smp_mb__after_atomic();
3046
	queue_work(md->wq, &md->work);
3047 3048
}

Linus Torvalds's avatar
Linus Torvalds committed
3049
/*
3050
 * Swap in a new table, returning the old one for the caller to destroy.
Linus Torvalds's avatar
Linus Torvalds committed
3051
 */
3052
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
Linus Torvalds's avatar
Linus Torvalds committed
3053
{
3054
	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
3055
	struct queue_limits limits;
3056
	int r;
Linus Torvalds's avatar
Linus Torvalds committed
3057

3058
	mutex_lock(&md->suspend_lock);
Linus Torvalds's avatar
Linus Torvalds committed
3059 3060

	/* device must be suspended */
3061
	if (!dm_suspended_md(md))
3062
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
3063

3064 3065 3066 3067 3068 3069 3070
	/*
	 * If the new table has no data devices, retain the existing limits.
	 * This helps multipath with queue_if_no_path if all paths disappear,
	 * then new I/O is queued based on these limits, and then some paths
	 * reappear.
	 */
	if (dm_table_has_no_data_devices(table)) {
3071
		live_map = dm_get_live_table_fast(md);
3072 3073
		if (live_map)
			limits = md->queue->limits;
3074
		dm_put_live_table_fast(md);
3075 3076
	}

3077 3078 3079 3080 3081 3082
	if (!live_map) {
		r = dm_calculate_queue_limits(table, &limits);
		if (r) {
			map = ERR_PTR(r);
			goto out;
		}
3083
	}
3084

3085
	map = __bind(md, table, &limits);
Linus Torvalds's avatar
Linus Torvalds committed
3086

3087
out:
3088
	mutex_unlock(&md->suspend_lock);
3089
	return map;
Linus Torvalds's avatar
Linus Torvalds committed
3090 3091 3092 3093 3094 3095
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
3096
static int lock_fs(struct mapped_device *md)
Linus Torvalds's avatar
Linus Torvalds committed
3097
{
3098
	int r;
Linus Torvalds's avatar
Linus Torvalds committed
3099 3100

	WARN_ON(md->frozen_sb);
3101

3102
	md->frozen_sb = freeze_bdev(md->bdev);
3103
	if (IS_ERR(md->frozen_sb)) {
3104
		r = PTR_ERR(md->frozen_sb);
3105 3106
		md->frozen_sb = NULL;
		return r;
3107 3108
	}

3109 3110
	set_bit(DMF_FROZEN, &md->flags);

Linus Torvalds's avatar
Linus Torvalds committed
3111 3112 3113
	return 0;
}

3114
static void unlock_fs(struct mapped_device *md)
Linus Torvalds's avatar
Linus Torvalds committed
3115
{
3116 3117 3118
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

3119
	thaw_bdev(md->bdev, md->frozen_sb);
Linus Torvalds's avatar
Linus Torvalds committed
3120
	md->frozen_sb = NULL;
3121
	clear_bit(DMF_FROZEN, &md->flags);
Linus Torvalds's avatar
Linus Torvalds committed
3122 3123 3124
}

/*
3125 3126 3127
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
3128
 *
3129
 * Caller must hold md->suspend_lock
3130
 */
3131 3132
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
			unsigned suspend_flags, int interruptible)
Linus Torvalds's avatar
Linus Torvalds committed
3133
{
3134 3135 3136
	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
	int r;
Linus Torvalds's avatar
Linus Torvalds committed
3137

3138 3139 3140 3141 3142 3143 3144
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);

3145 3146 3147 3148
	/*
	 * This gets reverted if there's an error later and the targets
	 * provide the .presuspend_undo hook.
	 */
3149 3150
	dm_table_presuspend_targets(map);

3151
	/*
3152 3153 3154 3155
	 * Flush I/O to the device.
	 * Any I/O submitted after lock_fs() may not be flushed.
	 * noflush takes precedence over do_lockfs.
	 * (lock_fs() flushes I/Os and waits for them to complete.)
3156 3157 3158
	 */
	if (!noflush && do_lockfs) {
		r = lock_fs(md);
3159 3160
		if (r) {
			dm_table_presuspend_undo_targets(map);
3161
			return r;
3162
		}
3163
	}
Linus Torvalds's avatar
Linus Torvalds committed
3164 3165

	/*
3166 3167 3168 3169 3170 3171 3172
	 * Here we must make sure that no processes are submitting requests
	 * to target drivers i.e. no one may be executing
	 * __split_and_process_bio. This is called from dm_request and
	 * dm_wq_work.
	 *
	 * To get all processes out of __split_and_process_bio in dm_request,
	 * we take the write lock. To prevent any process from reentering
3173 3174 3175
	 * __split_and_process_bio from dm_request and quiesce the thread
	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
	 * flush_workqueue(md->wq).
Linus Torvalds's avatar
Linus Torvalds committed
3176
	 */
3177
	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3178 3179
	if (map)
		synchronize_srcu(&md->io_barrier);
Linus Torvalds's avatar
Linus Torvalds committed
3180

3181
	/*
3182 3183
	 * Stop md->queue before flushing md->wq in case request-based
	 * dm defers requests to md->wq from md->queue.
3184
	 */
3185
	if (dm_request_based(md)) {
3186
		stop_queue(md->queue);
3187 3188
		if (md->kworker_task)
			flush_kthread_worker(&md->kworker);
3189
	}
3190

3191 3192
	flush_workqueue(md->wq);

Linus Torvalds's avatar
Linus Torvalds committed
3193
	/*
3194 3195 3196
	 * At this point no more requests are entering target request routines.
	 * We call dm_wait_for_completion to wait for all existing requests
	 * to finish.
Linus Torvalds's avatar
Linus Torvalds committed
3197
	 */
3198
	r = dm_wait_for_completion(md, interruptible);
Linus Torvalds's avatar
Linus Torvalds committed
3199

3200
	if (noflush)
3201
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
3202 3203
	if (map)
		synchronize_srcu(&md->io_barrier);
3204

Linus Torvalds's avatar
Linus Torvalds committed
3205
	/* were we interrupted ? */
3206
	if (r < 0) {
3207
		dm_queue_flush(md);
Milan Broz's avatar
Milan Broz committed
3208

3209
		if (dm_request_based(md))
3210
			start_queue(md->queue);
3211

3212
		unlock_fs(md);
3213
		dm_table_presuspend_undo_targets(map);
3214
		/* pushback list is already flushed, so skip flush */
3215
	}
Linus Torvalds's avatar
Linus Torvalds committed
3216

3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
	return r;
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
	struct dm_table *map = NULL;
	int r = 0;

retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

	if (dm_suspended_md(md)) {
		r = -EINVAL;
		goto out_unlock;
	}

	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

3258
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3259 3260 3261 3262

	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
	if (r)
		goto out_unlock;
3263

3264
	set_bit(DMF_SUSPENDED, &md->flags);
3265

3266 3267
	dm_table_postsuspend_targets(map);

3268
out_unlock:
3269
	mutex_unlock(&md->suspend_lock);
3270
	return r;
Linus Torvalds's avatar
Linus Torvalds committed
3271 3272
}

3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
	if (map) {
		int r = dm_table_resume_targets(map);
		if (r)
			return r;
	}

	dm_queue_flush(md);

	/*
	 * Flushing deferred I/Os must be done after targets are resumed
	 * so that mapping of targets can work correctly.
	 * Request-based dm is queueing the deferred I/Os in its request_queue.
	 */
	if (dm_request_based(md))
		start_queue(md->queue);

	unlock_fs(md);

	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
3296 3297
int dm_resume(struct mapped_device *md)
{
3298 3299
	int r = -EINVAL;
	struct dm_table *map = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
3300

3301 3302 3303
retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

3304
	if (!dm_suspended_md(md))
3305 3306
		goto out;

3307 3308 3309 3310 3311 3312 3313 3314 3315
	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

3316
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3317
	if (!map || !dm_table_get_size(map))
3318
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
3319

3320
	r = __dm_resume(md, map);
3321 3322
	if (r)
		goto out;
3323 3324 3325

	clear_bit(DMF_SUSPENDED, &md->flags);

3326 3327
	r = 0;
out:
3328
	mutex_unlock(&md->suspend_lock);
3329

3330
	return r;
Linus Torvalds's avatar
Linus Torvalds committed
3331 3332
}

Mikulas Patocka's avatar
Mikulas Patocka committed
3333 3334 3335 3336 3337 3338
/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

3339
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
Mikulas Patocka's avatar
Mikulas Patocka committed
3340
{
3341 3342
	struct dm_table *map = NULL;

3343
	if (md->internal_suspend_count++)
3344 3345 3346 3347 3348 3349 3350
		return; /* nested internal suspend */

	if (dm_suspended_md(md)) {
		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
		return; /* nest suspend */
	}

3351
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367

	/*
	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
	 * would require changing .presuspend to return an error -- avoid this
	 * until there is a need for more elaborate variants of internal suspend.
	 */
	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);

	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);

	dm_table_postsuspend_targets(map);
}

static void __dm_internal_resume(struct mapped_device *md)
{
3368 3369 3370
	BUG_ON(!md->internal_suspend_count);

	if (--md->internal_suspend_count)
3371 3372
		return; /* resume from nested internal suspend */

Mikulas Patocka's avatar
Mikulas Patocka committed
3373
	if (dm_suspended_md(md))
3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412
		goto done; /* resume from nested suspend */

	/*
	 * NOTE: existing callers don't need to call dm_table_resume_targets
	 * (which may fail -- so best to avoid it for now by passing NULL map)
	 */
	(void) __dm_resume(md, NULL);

done:
	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
	smp_mb__after_atomic();
	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}

void dm_internal_suspend_noflush(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);

void dm_internal_resume(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_resume(md);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
Mikulas Patocka's avatar
Mikulas Patocka committed
3413 3414 3415 3416 3417 3418 3419
		return;

	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
	synchronize_srcu(&md->io_barrier);
	flush_workqueue(md->wq);
	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
3420
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
Mikulas Patocka's avatar
Mikulas Patocka committed
3421

3422
void dm_internal_resume_fast(struct mapped_device *md)
Mikulas Patocka's avatar
Mikulas Patocka committed
3423
{
3424
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
Mikulas Patocka's avatar
Mikulas Patocka committed
3425 3426 3427 3428 3429 3430 3431
		goto done;

	dm_queue_flush(md);

done:
	mutex_unlock(&md->suspend_lock);
}
3432
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
Mikulas Patocka's avatar
Mikulas Patocka committed
3433

Linus Torvalds's avatar
Linus Torvalds committed
3434 3435 3436
/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
3437
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3438
		       unsigned cookie)
3439
{
3440 3441 3442 3443
	char udev_cookie[DM_COOKIE_LENGTH];
	char *envp[] = { udev_cookie, NULL };

	if (!cookie)
3444
		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
3445 3446 3447
	else {
		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
			 DM_COOKIE_ENV_VAR_NAME, cookie);
3448 3449
		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
					  action, envp);
3450
	}
3451 3452
}

Mike Anderson's avatar
Mike Anderson committed
3453 3454 3455 3456 3457
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

Linus Torvalds's avatar
Linus Torvalds committed
3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

Mike Anderson's avatar
Mike Anderson committed
3469 3470 3471 3472 3473 3474 3475 3476 3477
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
3478 3479 3480 3481 3482 3483 3484 3485
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}
3486
EXPORT_SYMBOL_GPL(dm_disk);
Linus Torvalds's avatar
Linus Torvalds committed
3487

Milan Broz's avatar
Milan Broz committed
3488 3489
struct kobject *dm_kobject(struct mapped_device *md)
{
3490
	return &md->kobj_holder.kobj;
Milan Broz's avatar
Milan Broz committed
3491 3492 3493 3494 3495 3496
}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

3497
	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
Milan Broz's avatar
Milan Broz committed
3498

3499
	if (test_bit(DMF_FREEING, &md->flags) ||
3500
	    dm_deleting_md(md))
3501 3502
		return NULL;

Milan Broz's avatar
Milan Broz committed
3503 3504 3505 3506
	dm_get(md);
	return md;
}

3507
int dm_suspended_md(struct mapped_device *md)
Linus Torvalds's avatar
Linus Torvalds committed
3508 3509 3510 3511
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

3512 3513 3514 3515 3516
int dm_suspended_internally_md(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}

3517 3518 3519 3520 3521
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}

3522 3523
int dm_suspended(struct dm_target *ti)
{
3524
	return dm_suspended_md(dm_table_get_md(ti->table));
3525 3526 3527
}
EXPORT_SYMBOL_GPL(dm_suspended);

3528 3529
int dm_noflush_suspending(struct dm_target *ti)
{
3530
	return __noflush_suspending(dm_table_get_md(ti->table));
3531 3532 3533
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

3534 3535
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
					    unsigned integrity, unsigned per_bio_data_size)
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3536
{
3537
	struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
3538
	struct kmem_cache *cachep = NULL;
3539
	unsigned int pool_size = 0;
3540
	unsigned int front_pad;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3541 3542 3543 3544

	if (!pools)
		return NULL;

3545 3546
	type = filter_md_type(type, md);

3547 3548
	switch (type) {
	case DM_TYPE_BIO_BASED:
3549
		cachep = _io_cache;
3550
		pool_size = dm_get_reserved_bio_based_ios();
3551
		front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3552 3553
		break;
	case DM_TYPE_REQUEST_BASED:
3554
		cachep = _rq_tio_cache;
3555
		pool_size = dm_get_reserved_rq_based_ios();
3556 3557 3558
		pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
		if (!pools->rq_pool)
			goto out;
3559 3560 3561 3562
		/* fall through to setup remaining rq-based pools */
	case DM_TYPE_MQ_REQUEST_BASED:
		if (!pool_size)
			pool_size = dm_get_reserved_rq_based_ios();
3563 3564 3565
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
		/* per_bio_data_size is not used. See __bind_mempools(). */
		WARN_ON(per_bio_data_size != 0);
3566 3567
		break;
	default:
3568
		BUG();
3569
	}
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3570

3571 3572 3573 3574 3575
	if (cachep) {
		pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
		if (!pools->io_pool)
			goto out;
	}
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3576

3577
	pools->bs = bioset_create_nobvec(pool_size, front_pad);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3578
	if (!pools->bs)
3579
		goto out;
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3580

3581
	if (integrity && bioset_integrity_create(pools->bs, pool_size))
3582
		goto out;
3583

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3584 3585
	return pools;

3586 3587
out:
	dm_free_md_mempools(pools);
Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599

	return NULL;
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
		return;

	if (pools->io_pool)
		mempool_destroy(pools->io_pool);

3600 3601 3602
	if (pools->rq_pool)
		mempool_destroy(pools->rq_pool);

Kiyoshi Ueda's avatar
Kiyoshi Ueda committed
3603 3604 3605 3606 3607 3608
	if (pools->bs)
		bioset_free(pools->bs);

	kfree(pools);
}

3609
static const struct block_device_operations dm_blk_dops = {
Linus Torvalds's avatar
Linus Torvalds committed
3610 3611
	.open = dm_blk_open,
	.release = dm_blk_close,
3612
	.ioctl = dm_blk_ioctl,
Darrick J. Wong's avatar
Darrick J. Wong committed
3613
	.getgeo = dm_blk_getgeo,
Linus Torvalds's avatar
Linus Torvalds committed
3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
	.owner = THIS_MODULE
};

/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
3625

3626 3627 3628
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");

3629 3630 3631
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");

3632 3633 3634
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");

Linus Torvalds's avatar
Linus Torvalds committed
3635 3636 3637
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");