Commit 527d1529 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.4/integrity' of git://git.kernel.dk/linux-block

Pull block integrity updates from Jens Axboe:
 ""This is the joint work of Dan and Martin, cleaning up and improving
  the support for block data integrity"

* 'for-4.4/integrity' of git://git.kernel.dk/linux-block:
  block, libnvdimm, nvme: provide a built-in blk_integrity nop profile
  block: blk_flush_integrity() for bio-based drivers
  block: move blk_integrity to request_queue
  block: generic request_queue reference counting
  nvme: suspend i/o during runtime blk_integrity_unregister
  md: suspend i/o during runtime blk_integrity_unregister
  md, dm, scsi, nvme, libnvdimm: drop blk_integrity_unregister() at shutdown
  block: Inline blk_integrity in struct gendisk
  block: Export integrity data interval size in sysfs
  block: Reduce the size of struct blk_integrity
  block: Consolidate static integrity profile properties
  block: Move integrity kobject to struct gendisk
parents effa04cc 4125a09b
...@@ -60,6 +60,13 @@ Description: ...@@ -60,6 +60,13 @@ Description:
Indicates whether a storage device is capable of storing Indicates whether a storage device is capable of storing
integrity metadata. Set if the device is T10 PI-capable. integrity metadata. Set if the device is T10 PI-capable.
What: /sys/block/<disk>/integrity/protection_interval_bytes
Date: July 2015
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Describes the number of data bytes which are protected
by one integrity tuple. Typically the device's logical
block size.
What: /sys/block/<disk>/integrity/write_generate What: /sys/block/<disk>/integrity/write_generate
Date: June 2008 Date: June 2008
......
...@@ -32,6 +32,11 @@ ...@@ -32,6 +32,11 @@
static struct kmem_cache *bip_slab; static struct kmem_cache *bip_slab;
static struct workqueue_struct *kintegrityd_wq; static struct workqueue_struct *kintegrityd_wq;
void blk_flush_integrity(void)
{
flush_workqueue(kintegrityd_wq);
}
/** /**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio * bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to * @bio: bio to attach integrity metadata to
...@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio) ...@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio)
if (bi == NULL) if (bi == NULL)
return false; return false;
if (bio_data_dir(bio) == READ && bi->verify_fn != NULL && if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
(bi->flags & BLK_INTEGRITY_VERIFY)) (bi->flags & BLK_INTEGRITY_VERIFY))
return true; return true;
if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL && if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
(bi->flags & BLK_INTEGRITY_GENERATE)) (bi->flags & BLK_INTEGRITY_GENERATE))
return true; return true;
...@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled); ...@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled);
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors) unsigned int sectors)
{ {
return sectors >> (ilog2(bi->interval) - 9); return sectors >> (bi->interval_exp - 9);
} }
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
...@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio, ...@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio,
bip->bip_vec->bv_offset; bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name; iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = bi->interval; iter.interval = 1 << bi->interval_exp;
iter.seed = bip_get_seed(bip); iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf; iter.prot_buf = prot_buf;
...@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio) ...@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio)
/* Auto-generate integrity metadata if this is a write */ /* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE) if (bio_data_dir(bio) == WRITE)
bio_integrity_process(bio, bi->generate_fn); bio_integrity_process(bio, bi->profile->generate_fn);
return 0; return 0;
} }
...@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) ...@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio *bio = bip->bip_bio; struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
bio->bi_error = bio_integrity_process(bio, bi->verify_fn); bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
/* Restore original bio completion handler */ /* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
......
...@@ -554,22 +554,23 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -554,22 +554,23 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to * Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished. * prevent that q->request_fn() gets invoked after draining finished.
*/ */
if (q->mq_ops) { blk_freeze_queue(q);
blk_mq_freeze_queue(q);
spin_lock_irq(lock);
} else {
spin_lock_irq(lock); spin_lock_irq(lock);
if (!q->mq_ops)
__blk_drain_queue(q, true); __blk_drain_queue(q, true);
}
queue_flag_set(QUEUE_FLAG_DEAD, q); queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock); spin_unlock_irq(lock);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q); blk_sync_queue(q);
if (q->mq_ops) if (q->mq_ops)
blk_mq_free_queue(q); blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock); spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock) if (q->queue_lock != &q->__queue_lock)
...@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask) ...@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
} }
EXPORT_SYMBOL(blk_alloc_queue); EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
int ret;
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
if (!(gfp & __GFP_WAIT))
return -EBUSY;
ret = wait_event_interruptible(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
return ret;
}
}
void blk_queue_exit(struct request_queue *q)
{
percpu_ref_put(&q->q_usage_counter);
}
static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{ {
struct request_queue *q; struct request_queue *q;
...@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
init_waitqueue_head(&q->mq_freeze_wq); init_waitqueue_head(&q->mq_freeze_wq);
if (blkcg_init_queue(q)) /*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_bdi; goto fail_bdi;
if (blkcg_init_queue(q))
goto fail_ref;
return q; return q;
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi: fail_bdi:
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
fail_split: fail_split:
...@@ -1992,9 +2038,19 @@ void generic_make_request(struct bio *bio) ...@@ -1992,9 +2038,19 @@ void generic_make_request(struct bio *bio)
do { do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
q->make_request_fn(q, bio); q->make_request_fn(q, bio);
blk_queue_exit(q);
bio = bio_list_pop(current->bio_list); bio = bio_list_pop(current->bio_list);
} else {
struct bio *bio_next = bio_list_pop(current->bio_list);
bio_io_error(bio);
bio = bio_next;
}
} while (bio); } while (bio);
current->bio_list = NULL; /* deactivate */ current->bio_list = NULL; /* deactivate */
} }
......
...@@ -30,10 +30,6 @@ ...@@ -30,10 +30,6 @@
#include "blk.h" #include "blk.h"
static struct kmem_cache *integrity_cachep;
static const char *bi_unsupported_name = "unsupported";
/** /**
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
* @q: request queue * @q: request queue
...@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg); ...@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
*/ */
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
{ {
struct blk_integrity *b1 = gd1->integrity; struct blk_integrity *b1 = &gd1->queue->integrity;
struct blk_integrity *b2 = gd2->integrity; struct blk_integrity *b2 = &gd2->queue->integrity;
if (!b1 && !b2) if (!b1->profile && !b2->profile)
return 0; return 0;
if (!b1 || !b2) if (!b1->profile || !b2->profile)
return -1; return -1;
if (b1->interval != b2->interval) { if (b1->interval_exp != b2->interval_exp) {
pr_err("%s: %s/%s protection interval %u != %u\n", pr_err("%s: %s/%s protection interval %u != %u\n",
__func__, gd1->disk_name, gd2->disk_name, __func__, gd1->disk_name, gd2->disk_name,
b1->interval, b2->interval); 1 << b1->interval_exp, 1 << b2->interval_exp);
return -1; return -1;
} }
if (b1->tuple_size != b2->tuple_size) { if (b1->tuple_size != b2->tuple_size) {
printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name, gd1->disk_name, gd2->disk_name,
b1->tuple_size, b2->tuple_size); b1->tuple_size, b2->tuple_size);
return -1; return -1;
} }
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
gd1->disk_name, gd2->disk_name, gd1->disk_name, gd2->disk_name,
b1->tag_size, b2->tag_size); b1->tag_size, b2->tag_size);
return -1; return -1;
} }
if (strcmp(b1->name, b2->name)) { if (b1->profile != b2->profile) {
printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, pr_err("%s: %s/%s type %s != %s\n", __func__,
gd1->disk_name, gd2->disk_name, gd1->disk_name, gd2->disk_name,
b1->name, b2->name); b1->profile->name, b2->profile->name);
return -1; return -1;
} }
...@@ -249,8 +245,8 @@ struct integrity_sysfs_entry { ...@@ -249,8 +245,8 @@ struct integrity_sysfs_entry {
static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
char *page) char *page)
{ {
struct blk_integrity *bi = struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
container_of(kobj, struct blk_integrity, kobj); struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry = struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr); container_of(attr, struct integrity_sysfs_entry, attr);
...@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj, ...@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
struct attribute *attr, const char *page, struct attribute *attr, const char *page,
size_t count) size_t count)
{ {
struct blk_integrity *bi = struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
container_of(kobj, struct blk_integrity, kobj); struct blk_integrity *bi = &disk->queue->integrity;
struct integrity_sysfs_entry *entry = struct integrity_sysfs_entry *entry =
container_of(attr, struct integrity_sysfs_entry, attr); container_of(attr, struct integrity_sysfs_entry, attr);
ssize_t ret = 0; ssize_t ret = 0;
...@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj, ...@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
{ {
if (bi != NULL && bi->name != NULL) if (bi->profile && bi->profile->name)
return sprintf(page, "%s\n", bi->name); return sprintf(page, "%s\n", bi->profile->name);
else else
return sprintf(page, "none\n"); return sprintf(page, "none\n");
} }
static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
{ {
if (bi != NULL)
return sprintf(page, "%u\n", bi->tag_size); return sprintf(page, "%u\n", bi->tag_size);
else }
return sprintf(page, "0\n");
static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
{
return sprintf(page, "%u\n",
bi->interval_exp ? 1 << bi->interval_exp : 0);
} }
static ssize_t integrity_verify_store(struct blk_integrity *bi, static ssize_t integrity_verify_store(struct blk_integrity *bi,
...@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = { ...@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = {
.show = integrity_tag_size_show, .show = integrity_tag_size_show,
}; };
static struct integrity_sysfs_entry integrity_interval_entry = {
.attr = { .name = "protection_interval_bytes", .mode = S_IRUGO },
.show = integrity_interval_show,
};
static struct integrity_sysfs_entry integrity_verify_entry = { static struct integrity_sysfs_entry integrity_verify_entry = {
.attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
.show = integrity_verify_show, .show = integrity_verify_show,
...@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = { ...@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = {
static struct attribute *integrity_attrs[] = { static struct attribute *integrity_attrs[] = {
&integrity_format_entry.attr, &integrity_format_entry.attr,
&integrity_tag_size_entry.attr, &integrity_tag_size_entry.attr,
&integrity_interval_entry.attr,
&integrity_verify_entry.attr, &integrity_verify_entry.attr,
&integrity_generate_entry.attr, &integrity_generate_entry.attr,
&integrity_device_entry.attr, &integrity_device_entry.attr,
...@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = { ...@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = {
.store = &integrity_attr_store, .store = &integrity_attr_store,
}; };
static int __init blk_dev_integrity_init(void)
{
integrity_cachep = kmem_cache_create("blkdev_integrity",
sizeof(struct blk_integrity),
0, SLAB_PANIC, NULL);
return 0;
}
subsys_initcall(blk_dev_integrity_init);
static void blk_integrity_release(struct kobject *kobj)
{
struct blk_integrity *bi =
container_of(kobj, struct blk_integrity, kobj);
kmem_cache_free(integrity_cachep, bi);
}
static struct kobj_type integrity_ktype = { static struct kobj_type integrity_ktype = {
.default_attrs = integrity_attrs, .default_attrs = integrity_attrs,
.sysfs_ops = &integrity_ops, .sysfs_ops = &integrity_ops,
.release = blk_integrity_release,
}; };
bool blk_integrity_is_initialized(struct gendisk *disk) static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{ {
struct blk_integrity *bi = blk_get_integrity(disk); return 0;
return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
} }
EXPORT_SYMBOL(blk_integrity_is_initialized);
static struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
};
/** /**
* blk_integrity_register - Register a gendisk as being integrity-capable * blk_integrity_register - Register a gendisk as being integrity-capable
* @disk: struct gendisk pointer to make integrity-aware * @disk: struct gendisk pointer to make integrity-aware
* @template: optional integrity profile to register * @template: block integrity profile to register
* *
* Description: When a device needs to advertise itself as being able * Description: When a device needs to advertise itself as being able to
* to send/receive integrity metadata it must use this function to * send/receive integrity metadata it must use this function to register
* register the capability with the block layer. The template is a * the capability with the block layer. The template is a blk_integrity
* blk_integrity struct with values appropriate for the underlying * struct with values appropriate for the underlying hardware. See
* hardware. If template is NULL the new profile is allocated but * Documentation/block/data-integrity.txt.
* not filled out. See Documentation/block/data-integrity.txt.
*/ */
int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
{ {
struct blk_integrity *bi; struct blk_integrity *bi = &disk->queue->integrity;
BUG_ON(disk == NULL);
if (disk->integrity == NULL) { bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
bi = kmem_cache_alloc(integrity_cachep, template->flags;
GFP_KERNEL | __GFP_ZERO); bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
if (!bi) bi->profile = template->profile ? template->profile : &nop_profile;
return -1;
if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
&disk_to_dev(disk)->kobj,
"%s", "integrity")) {
kmem_cache_free(integrity_cachep, bi);
return -1;
}
kobject_uevent(&bi->kobj, KOBJ_ADD);
bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE;
bi->interval = queue_logical_block_size(disk->queue);
disk->integrity = bi;
} else
bi = disk->integrity;
/* Use the provided profile as template */
if (template != NULL) {
bi->name = template->name;
bi->generate_fn = template->generate_fn;
bi->verify_fn = template->verify_fn;
bi->tuple_size = template->tuple_size; bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size; bi->tag_size = template->tag_size;
bi->flags |= template->flags;
} else
bi->name = bi_unsupported_name;
disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
return 0; blk_integrity_revalidate(disk);
} }
EXPORT_SYMBOL(blk_integrity_register); EXPORT_SYMBOL(blk_integrity_register);
/** /**
* blk_integrity_unregister - Remove block integrity profile * blk_integrity_unregister - Unregister block integrity profile
* @disk: disk whose integrity profile to deallocate * @disk: disk whose integrity profile to unregister
* *
* Description: This function frees all memory used by the block * Description: This function unregisters the integrity capability from
* integrity profile. To be called at device teardown. * a block device.
*/ */
void blk_integrity_unregister(struct gendisk *disk) void blk_integrity_unregister(struct gendisk *disk)
{ {
struct blk_integrity *bi; blk_integrity_revalidate(disk);
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
if (!disk || !disk->integrity) void blk_integrity_revalidate(struct gendisk *disk)
{
struct blk_integrity *bi = &disk->queue->integrity;
if (!(disk->flags & GENHD_FL_UP))
return; return;
disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; if (bi->profile)
disk->queue->backing_dev_info.capabilities |=
BDI_CAP_STABLE_WRITES;
else
disk->queue->backing_dev_info.capabilities &=
~BDI_CAP_STABLE_WRITES;
}
void blk_integrity_add(struct gendisk *disk)
{
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
&disk_to_dev(disk)->kobj, "%s", "integrity"))
return;
bi = disk->integrity; kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
}
kobject_uevent(&bi->kobj, KOBJ_REMOVE); void blk_integrity_del(struct gendisk *disk)
kobject_del(&bi->kobj); {
kobject_put(&bi->kobj); kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
disk->integrity = NULL; kobject_del(&disk->integrity_kobj);
kobject_put(&disk->integrity_kobj);
} }
EXPORT_SYMBOL(blk_integrity_unregister);
...@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q) ...@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
} }
/* see blk_register_queue() */
void blk_mq_finish_init(struct request_queue *q)
{
percpu_ref_switch_to_percpu(&q->mq_usage_counter);
}
int blk_mq_register_disk(struct gendisk *disk) int blk_mq_register_disk(struct gendisk *disk)
{ {
struct device *dev = disk_to_dev(disk); struct device *dev = disk_to_dev(disk);
......
...@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, ...@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
} }
static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
int ret;
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
if (!(gfp & __GFP_WAIT))
return -EBUSY;
ret = wait_event_interruptible(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
return ret;
}
}
static void blk_mq_queue_exit(struct request_queue *q)
{
percpu_ref_put(&q->mq_usage_counter);
}
static void blk_mq_usage_counter_release(struct percpu_ref *ref)
{
struct request_queue *q =
container_of(ref, struct request_queue, mq_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
void blk_mq_freeze_queue_start(struct request_queue *q) void blk_mq_freeze_queue_start(struct request_queue *q)
{ {
int freeze_depth; int freeze_depth;
freeze_depth = atomic_inc_return(&q->mq_freeze_depth); freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) { if (freeze_depth == 1) {
percpu_ref_kill(&q->mq_usage_counter); percpu_ref_kill(&q->q_usage_counter);
blk_mq_run_hw_queues(q, false); blk_mq_run_hw_queues(q, false);
} }
} }
...@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); ...@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q) static void blk_mq_freeze_queue_wait(struct request_queue *q)
{ {
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
} }
/* /*
* Guarantee no request is in use, so we can change any data structure of * Guarantee no request is in use, so we can change any data structure of
* the queue afterward. * the queue afterward.
*/ */
void blk_mq_freeze_queue(struct request_queue *q) void blk_freeze_queue(struct request_queue *q)
{ {
/*
* In the !blk_mq case we are only calling this to kill the
* q_usage_counter, otherwise this increases the freeze depth
* and waits for it to return to zero. For this reason there is
* no blk_unfreeze_queue(), and blk_freeze_queue() is not
* exported to drivers as the only user for unfreeze is blk_mq.
*/
blk_mq_freeze_queue_start(q); blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q); blk_mq_freeze_queue_wait(q);
} }
void blk_mq_freeze_queue(struct request_queue *q)
{
/*
* ...just an alias to keep freeze and unfreeze actions balanced
* in the blk_mq_* namespace
*/
blk_freeze_queue(q);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) void blk_mq_unfreeze_queue(struct request_queue *q)
...@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q) ...@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
freeze_depth = atomic_dec_return(&q->mq_freeze_depth); freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0); WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) { if (!freeze_depth) {
percpu_ref_reinit(&q->mq_usage_counter); percpu_ref_reinit(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
} }
...@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ...@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct blk_mq_alloc_data alloc_data; struct blk_mq_alloc_data alloc_data;
int ret; int ret;
ret = blk_mq_queue_enter(q, gfp); ret = blk_queue_enter(q, gfp);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ...@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
} }
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
if (!rq) { if (!rq) {
blk_mq_queue_exit(q); blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
} }
return rq; return rq;
...@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, ...@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_put_tag(hctx, tag, &ctx->last_tag);
blk_mq_queue_exit(q); blk_queue_exit(q);
} }
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
...@@ -1185,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, ...@@ -1185,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
struct blk_mq_alloc_data alloc_data; struct blk_mq_alloc_data alloc_data;
if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { blk_queue_enter_live(q);
bio_io_error(bio);
return NULL;
}
ctx = blk_mq_get_ctx(q); ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
...@@ -2024,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2024,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
hctxs[i]->queue_num = i; hctxs[i]->queue_num = i;
} }
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
...@@ -2112,8 +2082,6 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -2112,8 +2082,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
blk_mq_free_hw_queues(q, set); blk_mq_free_hw_queues(q, set);
percpu_ref_exit(&q->mq_usage_counter);
} }
/* Basically redo blk_mq_init_queue with queue frozen */ /* Basically redo blk_mq_init_queue with queue frozen */
......
...@@ -600,9 +600,8 @@ int blk_register_queue(struct gendisk *disk) ...@@ -600,9 +600,8 @@ int blk_register_queue(struct gendisk *disk)
*/ */
if (!blk_queue_init_done(q)) { if (!blk_queue_init_done(q)) {
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
percpu_ref_switch_to_percpu(&q->q_usage_counter);
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
if (q->mq_ops)
blk_mq_finish_init(q);
} }
ret = blk_trace_init_sysfs(dev); ret = blk_trace_init_sysfs(dev);
......
...@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq); ...@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q); void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error, bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes); unsigned int nr_bytes, unsigned int bidi_bytes);
int blk_queue_enter(struct request_queue *q, gfp_t gfp);
void blk_queue_exit(struct request_queue *q);
void blk_freeze_queue(struct request_queue *q);
static inline void blk_queue_enter_live(struct request_queue *q)
{
/*
* Given that running in generic_make_request() context
* guarantees that a live reference against q_usage_counter has
* been established, further references under that same context
* need not check that the queue has been frozen (marked dead).
*/
percpu_ref_get(&q->q_usage_counter);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
#else
static inline void blk_flush_integrity(void)
{
}
#endif
void blk_rq_timed_out_timer(unsigned long data); void blk_rq_timed_out_timer(unsigned long data);
unsigned long blk_rq_timeout(unsigned long timeout); unsigned long blk_rq_timeout(unsigned long timeout);
......
...@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk) ...@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk)
WARN_ON(retval); WARN_ON(retval);
disk_add_events(disk); disk_add_events(disk);
blk_integrity_add(disk);
} }
EXPORT_SYMBOL(add_disk); EXPORT_SYMBOL(add_disk);
...@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk) ...@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk)
struct disk_part_iter piter; struct disk_part_iter piter;
struct hd_struct *part; struct hd_struct *part;
blk_integrity_del(disk);
disk_del_events(disk); disk_del_events(disk);
/* invalidate stuff */ /* invalidate stuff */
......
...@@ -428,6 +428,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) ...@@ -428,6 +428,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
if (disk->fops->revalidate_disk) if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk); disk->fops->revalidate_disk(disk);
blk_integrity_revalidate(disk);
check_disk_size_change(disk, bdev); check_disk_size_change(disk, bdev);
bdev->bd_invalidated = 0; bdev->bd_invalidated = 0;
if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
......
...@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) ...@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, 3); return t10_pi_verify(iter, t10_pi_ip_fn, 3);
} }
struct blk_integrity t10_pi_type1_crc = { struct blk_integrity_profile t10_pi_type1_crc = {
.name = "T10-DIF-TYPE1-CRC", .name = "T10-DIF-TYPE1-CRC",
.generate_fn = t10_pi_type1_generate_crc, .generate_fn = t10_pi_type1_generate_crc,
.verify_fn = t10_pi_type1_verify_crc, .verify_fn = t10_pi_type1_verify_crc,
.tuple_size = sizeof(struct t10_pi_tuple),
.tag_size = 0,
}; };
EXPORT_SYMBOL(t10_pi_type1_crc); EXPORT_SYMBOL(t10_pi_type1_crc);
struct blk_integrity t10_pi_type1_ip = { struct blk_integrity_profile t10_pi_type1_ip = {
.name = "T10-DIF-TYPE1-IP", .name = "T10-DIF-TYPE1-IP",
.generate_fn = t10_pi_type1_generate_ip, .generate_fn = t10_pi_type1_generate_ip,
.verify_fn = t10_pi_type1_verify_ip, .verify_fn = t10_pi_type1_verify_ip,
.tuple_size = sizeof(struct t10_pi_tuple),
.tag_size = 0,
}; };
EXPORT_SYMBOL(t10_pi_type1_ip); EXPORT_SYMBOL(t10_pi_type1_ip);
struct blk_integrity t10_pi_type3_crc = { struct blk_integrity_profile t10_pi_type3_crc = {
.name = "T10-DIF-TYPE3-CRC", .name = "T10-DIF-TYPE3-CRC",
.generate_fn = t10_pi_type3_generate_crc, .generate_fn = t10_pi_type3_generate_crc,
.verify_fn = t10_pi_type3_verify_crc, .verify_fn = t10_pi_type3_verify_crc,
.tuple_size = sizeof(struct t10_pi_tuple),
.tag_size = 0,
}; };
EXPORT_SYMBOL(t10_pi_type3_crc); EXPORT_SYMBOL(t10_pi_type3_crc);
struct blk_integrity t10_pi_type3_ip = { struct blk_integrity_profile t10_pi_type3_ip = {
.name = "T10-DIF-TYPE3-IP", .name = "T10-DIF-TYPE3-IP",
.generate_fn = t10_pi_type3_generate_ip, .generate_fn = t10_pi_type3_generate_ip,
.verify_fn = t10_pi_type3_verify_ip, .verify_fn = t10_pi_type3_verify_ip,
.tuple_size = sizeof(struct t10_pi_tuple),
.tag_size = 0,
}; };
EXPORT_SYMBOL(t10_pi_type3_ip); EXPORT_SYMBOL(t10_pi_type3_ip);
...@@ -1014,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t) ...@@ -1014,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t)
return r; return r;
} }
static bool integrity_profile_exists(struct gendisk *disk)
{
return !!blk_get_integrity(disk);
}
/* /*
* Get a disk whose integrity profile reflects the table's profile. * Get a disk whose integrity profile reflects the table's profile.
* If %match_all is true, all devices' profiles must match.
* If %match_all is false, all devices must at least have an
* allocated integrity profile; but uninitialized is ok.
* Returns NULL if integrity support was inconsistent or unavailable. * Returns NULL if integrity support was inconsistent or unavailable.
*/ */
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
bool match_all)
{ {
struct list_head *devices = dm_table_get_devices(t); struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL; struct dm_dev_internal *dd = NULL;
...@@ -1030,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, ...@@ -1030,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
list_for_each_entry(dd, devices, list) { list_for_each_entry(dd, devices, list) {
template_disk = dd->dm_dev->bdev->bd_disk; template_disk = dd->dm_dev->bdev->bd_disk;
if (!blk_get_integrity(template_disk)) if (!integrity_profile_exists(template_disk))
goto no_integrity; goto no_integrity;
if (!match_all && !blk_integrity_is_initialized(template_disk))
continue; /* skip uninitialized profiles */
else if (prev_disk && else if (prev_disk &&
blk_integrity_compare(prev_disk, template_disk) < 0) blk_integrity_compare(prev_disk, template_disk) < 0)
goto no_integrity; goto no_integrity;
...@@ -1052,34 +1051,40 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, ...@@ -1052,34 +1051,40 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
} }
/* /*
* Register the mapped device for blk_integrity support if * Register the mapped device for blk_integrity support if the
* the underlying devices have an integrity profile. But all devices * underlying devices have an integrity profile. But all devices may
* may not have matching profiles (checking all devices isn't reliable * not have matching profiles (checking all devices isn't reliable
* during table load because this table may use other DM device(s) which * during table load because this table may use other DM device(s) which
* must be resumed before they will have an initialized integity profile). * must be resumed before they will have an initialized integity
* Stacked DM devices force a 2 stage integrity profile validation: * profile). Consequently, stacked DM devices force a 2 stage integrity
* 1 - during load, validate all initialized integrity profiles match * profile validation: First pass during table load, final pass during
* 2 - during resume, validate all integrity profiles match * resume.
*/ */
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) static int dm_table_register_integrity(struct dm_table *t)
{ {
struct mapped_device *md = t->md;
struct gendisk *template_disk = NULL; struct gendisk *template_disk = NULL;
template_disk = dm_table_get_integrity_disk(t, false); template_disk = dm_table_get_integrity_disk(t);
if (!template_disk) if (!template_disk)
return 0; return 0;
if (!blk_integrity_is_initialized(dm_disk(md))) { if (!integrity_profile_exists(dm_disk(md))) {
t->integrity_supported = 1; t->integrity_supported = 1;
return blk_integrity_register(dm_disk(md), NULL); /*
* Register integrity profile during table load; we can do
* this because the final profile must match during resume.
*/
blk_integrity_register(dm_disk(md),
blk_get_integrity(template_disk));
return 0;
} }
/* /*
* If DM device already has an initalized integrity * If DM device already has an initialized integrity
* profile the new profile should not conflict. * profile the new profile should not conflict.
*/ */
if (blk_integrity_is_initialized(template_disk) && if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
blk_integrity_compare(dm_disk(md), template_disk) < 0) {
DMWARN("%s: conflict with existing integrity profile: " DMWARN("%s: conflict with existing integrity profile: "
"%s profile mismatch", "%s profile mismatch",
dm_device_name(t->md), dm_device_name(t->md),
...@@ -1087,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device ...@@ -1087,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
return 1; return 1;
} }
/* Preserve existing initialized integrity profile */ /* Preserve existing integrity profile */
t->integrity_supported = 1; t->integrity_supported = 1;
return 0; return 0;
} }
...@@ -1112,7 +1117,7 @@ int dm_table_complete(struct dm_table *t) ...@@ -1112,7 +1117,7 @@ int dm_table_complete(struct dm_table *t)
return r; return r;
} }
r = dm_table_prealloc_integrity(t, t->md); r = dm_table_register_integrity(t);
if (r) { if (r) {
DMERR("could not register integrity profile."); DMERR("could not register integrity profile.");
return r; return r;
...@@ -1278,29 +1283,30 @@ int dm_calculate_queue_limits(struct dm_table *table, ...@@ -1278,29 +1283,30 @@ int dm_calculate_queue_limits(struct dm_table *table,
} }
/* /*
* Set the integrity profile for this device if all devices used have * Verify that all devices have an integrity profile that matches the
* matching profiles. We're quite deep in the resume path but still * DM device's registered integrity profile. If the profiles don't
* don't know if all devices (particularly DM devices this device * match then unregister the DM device's integrity profile.
* may be stacked on) have matching profiles. Even if the profiles
* don't match we have no way to fail (to resume) at this point.
*/ */
static void dm_table_set_integrity(struct dm_table *t) static void dm_table_verify_integrity(struct dm_table *t)
{ {
struct gendisk *template_disk = NULL; struct gendisk *template_disk = NULL;
if (!blk_get_integrity(dm_disk(t->md))) if (t->integrity_supported) {
/*
* Verify that the original integrity profile
* matches all the devices in this table.
*/
template_disk = dm_table_get_integrity_disk(t);
if (template_disk &&
blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
return; return;
}
template_disk = dm_table_get_integrity_disk(t, true); if (integrity_profile_exists(dm_disk(t->md))) {
if (template_disk)
blk_integrity_register(dm_disk(t->md),
blk_get_integrity(template_disk));
else if (blk_integrity_is_initialized(dm_disk(t->md)))
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
else
DMWARN("%s: unable to establish an integrity profile", DMWARN("%s: unable to establish an integrity profile",
dm_device_name(t->md)); dm_device_name(t->md));
blk_integrity_unregister(dm_disk(t->md));
}
} }
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
...@@ -1500,7 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1500,7 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
dm_table_set_integrity(t); dm_table_verify_integrity(t);
/* /*
* Determine whether or not this queue's I/O timings contribute * Determine whether or not this queue's I/O timings contribute
......
...@@ -2234,8 +2234,6 @@ static void cleanup_mapped_device(struct mapped_device *md) ...@@ -2234,8 +2234,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
spin_lock(&_minor_lock); spin_lock(&_minor_lock);
md->disk->private_data = NULL; md->disk->private_data = NULL;
spin_unlock(&_minor_lock); spin_unlock(&_minor_lock);
if (blk_get_integrity(md->disk))
blk_integrity_unregister(md->disk);
del_gendisk(md->disk); del_gendisk(md->disk);
put_disk(md->disk); put_disk(md->disk);
} }
......
...@@ -1962,12 +1962,9 @@ int md_integrity_register(struct mddev *mddev) ...@@ -1962,12 +1962,9 @@ int md_integrity_register(struct mddev *mddev)
* All component devices are integrity capable and have matching * All component devices are integrity capable and have matching
* profiles, register the common profile for the md device. * profiles, register the common profile for the md device.
*/ */
if (blk_integrity_register(mddev->gendisk, blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) { bdev_get_integrity(reference->bdev));
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n", printk(KERN_ERR "md: failed to create integrity pool for %s\n",
...@@ -1997,6 +1994,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) ...@@ -1997,6 +1994,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (bi_rdev && blk_integrity_compare(mddev->gendisk, if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0) rdev->bdev->bd_disk) >= 0)
return; return;
WARN_ON_ONCE(!mddev->suspended);
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk); blk_integrity_unregister(mddev->gendisk);
} }
...@@ -5542,7 +5540,6 @@ static int do_md_stop(struct mddev *mddev, int mode, ...@@ -5542,7 +5540,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP) if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0; mddev->hold_active = 0;
} }
blk_integrity_unregister(disk);
md_new_event(mddev); md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0; return 0;
......
...@@ -264,7 +264,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -264,7 +264,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
err = 0; err = 0;
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
break; break;
} }
......
...@@ -1621,7 +1621,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1621,7 +1621,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break; break;
} }
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
......
...@@ -1739,7 +1739,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1739,7 +1739,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
......
...@@ -1279,7 +1279,6 @@ static int btt_blk_init(struct btt *btt) ...@@ -1279,7 +1279,6 @@ static int btt_blk_init(struct btt *btt)
static void btt_blk_cleanup(struct btt *btt) static void btt_blk_cleanup(struct btt *btt)
{ {
blk_integrity_unregister(btt->btt_disk);
del_gendisk(btt->btt_disk); del_gendisk(btt->btt_disk);
put_disk(btt->btt_disk); put_disk(btt->btt_disk);
blk_cleanup_queue(btt->btt_queue); blk_cleanup_queue(btt->btt_queue);
......
...@@ -392,29 +392,18 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus) ...@@ -392,29 +392,18 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
EXPORT_SYMBOL_GPL(nvdimm_bus_unregister); EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
{
return 0;
}
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{ {
struct blk_integrity integrity = { struct blk_integrity bi;
.name = "ND-PI-NOP",
.generate_fn = nd_pi_nop_generate_verify,
.verify_fn = nd_pi_nop_generate_verify,
.tuple_size = meta_size,
.tag_size = meta_size,
};
int ret;
if (meta_size == 0) if (meta_size == 0)
return 0; return 0;
ret = blk_integrity_register(disk, &integrity); bi.profile = NULL;
if (ret) bi.tuple_size = meta_size;
return ret; bi.tag_size = meta_size;
blk_integrity_register(disk, &bi);
blk_queue_max_integrity_segments(disk->queue, 1); blk_queue_max_integrity_segments(disk->queue, 1);
return 0; return 0;
......
...@@ -538,7 +538,7 @@ static void nvme_dif_remap(struct request *req, ...@@ -538,7 +538,7 @@ static void nvme_dif_remap(struct request *req,
virt = bip_get_seed(bip); virt = bip_get_seed(bip);
phys = nvme_block_nr(ns, blk_rq_pos(req)); phys = nvme_block_nr(ns, blk_rq_pos(req));
nlb = (blk_rq_bytes(req) >> ns->lba_shift); nlb = (blk_rq_bytes(req) >> ns->lba_shift);
ts = ns->disk->integrity->tuple_size; ts = ns->disk->queue->integrity.tuple_size;
for (i = 0; i < nlb; i++, virt++, phys++) { for (i = 0; i < nlb; i++, virt++, phys++) {
pi = (struct t10_pi_tuple *)p; pi = (struct t10_pi_tuple *)p;
...@@ -548,36 +548,20 @@ static void nvme_dif_remap(struct request *req, ...@@ -548,36 +548,20 @@ static void nvme_dif_remap(struct request *req,
kunmap_atomic(pmap); kunmap_atomic(pmap);
} }
static int nvme_noop_verify(struct blk_integrity_iter *iter)
{
return 0;
}
static int nvme_noop_generate(struct blk_integrity_iter *iter)
{
return 0;
}
struct blk_integrity nvme_meta_noop = {
.name = "NVME_META_NOOP",
.generate_fn = nvme_noop_generate,
.verify_fn = nvme_noop_verify,
};
static void nvme_init_integrity(struct nvme_ns *ns) static void nvme_init_integrity(struct nvme_ns *ns)
{ {
struct blk_integrity integrity; struct blk_integrity integrity;
switch (ns->pi_type) { switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
integrity = t10_pi_type3_crc; integrity.profile = &t10_pi_type3_crc;
break; break;
case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
integrity = t10_pi_type1_crc; integrity.profile = &t10_pi_type1_crc;
break; break;
default: default:
integrity = nvme_meta_noop; integrity.profile = NULL;
break; break;
} }
integrity.tuple_size = ns->ms; integrity.tuple_size = ns->ms;
...@@ -2052,6 +2036,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -2052,6 +2036,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
id->dps & NVME_NS_DPS_PI_MASK : 0; id->dps & NVME_NS_DPS_PI_MASK : 0;
blk_mq_freeze_queue(disk->queue);
if (blk_get_integrity(disk) && (ns->pi_type != pi_type || if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
ns->ms != old_ms || ns->ms != old_ms ||
bs != queue_logical_block_size(disk->queue) || bs != queue_logical_block_size(disk->queue) ||
...@@ -2061,8 +2046,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -2061,8 +2046,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
ns->pi_type = pi_type; ns->pi_type = pi_type;
blk_queue_logical_block_size(ns->queue, bs); blk_queue_logical_block_size(ns->queue, bs);
if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && if (ns->ms && !ns->ext)
!ns->ext)
nvme_init_integrity(ns); nvme_init_integrity(ns);
if ((ns->ms && !(ns->ms == 8 && ns->pi_type) && if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
...@@ -2074,6 +2058,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -2074,6 +2058,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (dev->oncs & NVME_CTRL_ONCS_DSM) if (dev->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns); nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue);
kfree(id); kfree(id);
return 0; return 0;
...@@ -2429,11 +2414,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -2429,11 +2414,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (kill) if (kill)
blk_set_queue_dying(ns->queue); blk_set_queue_dying(ns->queue);
if (ns->disk->flags & GENHD_FL_UP) { if (ns->disk->flags & GENHD_FL_UP)
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
del_gendisk(ns->disk); del_gendisk(ns->disk);
}
if (kill || !blk_queue_dying(ns->queue)) { if (kill || !blk_queue_dying(ns->queue)) {
blk_mq_abort_requeue_list(ns->queue); blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue); blk_cleanup_queue(ns->queue);
......
...@@ -3068,7 +3068,6 @@ static void scsi_disk_release(struct device *dev) ...@@ -3068,7 +3068,6 @@ static void scsi_disk_release(struct device *dev)
ida_remove(&sd_index_ida, sdkp->index); ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock); spin_unlock(&sd_index_lock);
blk_integrity_unregister(disk);
disk->private_data = NULL; disk->private_data = NULL;
put_disk(disk); put_disk(disk);
put_device(&sdkp->device->sdev_gendev); put_device(&sdkp->device->sdev_gendev);
......
...@@ -43,6 +43,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp) ...@@ -43,6 +43,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk; struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type; u8 type = sdkp->protection_type;
struct blk_integrity bi;
int dif, dix; int dif, dix;
dif = scsi_host_dif_capable(sdp->host, type); dif = scsi_host_dif_capable(sdp->host, type);
...@@ -55,39 +56,43 @@ void sd_dif_config_host(struct scsi_disk *sdkp) ...@@ -55,39 +56,43 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
if (!dix) if (!dix)
return; return;
memset(&bi, 0, sizeof(bi));
/* Enable DMA of protection information */ /* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
if (type == SD_DIF_TYPE3_PROTECTION) if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &t10_pi_type3_ip); bi.profile = &t10_pi_type3_ip;
else else
blk_integrity_register(disk, &t10_pi_type1_ip); bi.profile = &t10_pi_type1_ip;
disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM; bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
} else } else
if (type == SD_DIF_TYPE3_PROTECTION) if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &t10_pi_type3_crc); bi.profile = &t10_pi_type3_crc;
else else
blk_integrity_register(disk, &t10_pi_type1_crc); bi.profile = &t10_pi_type1_crc;
bi.tuple_size = sizeof(struct t10_pi_tuple);
sd_printk(KERN_NOTICE, sdkp, sd_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s protection\n", disk->integrity->name); "Enabling DIX %s protection\n", bi.profile->name);
/* Signal to block layer that we support sector tagging */
if (dif && type) { if (dif && type) {
bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
if (!sdkp->ATO) if (!sdkp->ATO)
return; goto out;
if (type == SD_DIF_TYPE3_PROTECTION) if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32); bi.tag_size = sizeof(u16) + sizeof(u32);
else else
disk->integrity->tag_size = sizeof(u16); bi.tag_size = sizeof(u16);
sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n", sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size); bi.tag_size);
} }
out:
blk_integrity_register(disk, &bi);
} }
/* /*
......
...@@ -155,17 +155,17 @@ static int iblock_configure_device(struct se_device *dev) ...@@ -155,17 +155,17 @@ static int iblock_configure_device(struct se_device *dev)
if (bi) { if (bi) {
struct bio_set *bs = ib_dev->ibd_bio_set; struct bio_set *bs = ib_dev->ibd_bio_set;
if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
!strcmp(bi->name, "T10-DIF-TYPE1-IP")) { !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
pr_err("IBLOCK export of blk_integrity: %s not" pr_err("IBLOCK export of blk_integrity: %s not"
" supported\n", bi->name); " supported\n", bi->profile->name);
ret = -ENOSYS; ret = -ENOSYS;
goto out_blkdev_put; goto out_blkdev_put;
} }
if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
} }
......
...@@ -1075,7 +1075,7 @@ int revalidate_disk(struct gendisk *disk) ...@@ -1075,7 +1075,7 @@ int revalidate_disk(struct gendisk *disk)
if (disk->fops->revalidate_disk) if (disk->fops->revalidate_disk)
ret = disk->fops->revalidate_disk(disk); ret = disk->fops->revalidate_disk(disk);
blk_integrity_revalidate(disk);
bdev = bdget_disk(disk, 0); bdev = bdget_disk(disk, 0);
if (!bdev) if (!bdev)
return ret; return ret;
......
...@@ -166,7 +166,6 @@ enum { ...@@ -166,7 +166,6 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q); struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *); int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *);
......
...@@ -369,6 +369,10 @@ struct request_queue { ...@@ -369,6 +369,10 @@ struct request_queue {
*/ */
struct kobject mq_kobj; struct kobject mq_kobj;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#ifdef CONFIG_PM #ifdef CONFIG_PM
struct device *dev; struct device *dev;
int rpm_status; int rpm_status;
...@@ -450,7 +454,7 @@ struct request_queue { ...@@ -450,7 +454,7 @@ struct request_queue {
#endif #endif
struct rcu_head rcu_head; struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq; wait_queue_head_t mq_freeze_wq;
struct percpu_ref mq_usage_counter; struct percpu_ref q_usage_counter;
struct list_head all_q_node; struct list_head all_q_node;
struct blk_mq_tag_set *tag_set; struct blk_mq_tag_set *tag_set;
...@@ -1462,22 +1466,13 @@ struct blk_integrity_iter { ...@@ -1462,22 +1466,13 @@ struct blk_integrity_iter {
typedef int (integrity_processing_fn) (struct blk_integrity_iter *); typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
struct blk_integrity { struct blk_integrity_profile {
integrity_processing_fn *generate_fn; integrity_processing_fn *generate_fn;
integrity_processing_fn *verify_fn; integrity_processing_fn *verify_fn;
unsigned short flags;
unsigned short tuple_size;
unsigned short interval;
unsigned short tag_size;
const char *name; const char *name;
struct kobject kobj;
}; };
extern bool blk_integrity_is_initialized(struct gendisk *); extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *); extern void blk_integrity_unregister(struct gendisk *);
extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
...@@ -1488,15 +1483,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, ...@@ -1488,15 +1483,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
struct bio *); struct bio *);
static inline static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{ {
return bdev->bd_disk->integrity; struct blk_integrity *bi = &disk->queue->integrity;
if (!bi->profile)
return NULL;
return bi;
} }
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) static inline
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{ {
return disk->integrity; return blk_get_integrity(bdev->bd_disk);
} }
static inline bool blk_integrity_rq(struct request *rq) static inline bool blk_integrity_rq(struct request *rq)
...@@ -1570,10 +1570,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) ...@@ -1570,10 +1570,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{ {
return 0; return 0;
} }
static inline int blk_integrity_register(struct gendisk *d, static inline void blk_integrity_register(struct gendisk *d,
struct blk_integrity *b) struct blk_integrity *b)
{ {
return 0;
} }
static inline void blk_integrity_unregister(struct gendisk *d) static inline void blk_integrity_unregister(struct gendisk *d)
{ {
...@@ -1598,10 +1597,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, ...@@ -1598,10 +1597,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
{ {
return true; return true;
} }
static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
static inline bool integrity_req_gap_back_merge(struct request *req, static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next) struct bio *next)
{ {
......
...@@ -163,6 +163,18 @@ struct disk_part_tbl { ...@@ -163,6 +163,18 @@ struct disk_part_tbl {
struct disk_events; struct disk_events;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct blk_integrity {
struct blk_integrity_profile *profile;
unsigned char flags;
unsigned char tuple_size;
unsigned char interval_exp;
unsigned char tag_size;
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct gendisk { struct gendisk {
/* major, first_minor and minors are input parameters only, /* major, first_minor and minors are input parameters only,
* don't use directly. Use disk_devt() and disk_max_parts(). * don't use directly. Use disk_devt() and disk_max_parts().
...@@ -198,8 +210,8 @@ struct gendisk { ...@@ -198,8 +210,8 @@ struct gendisk {
atomic_t sync_io; /* RAID */ atomic_t sync_io; /* RAID */
struct disk_events *ev; struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity; struct kobject integrity_kobj;
#endif #endif /* CONFIG_BLK_DEV_INTEGRITY */
int node_id; int node_id;
}; };
...@@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) ...@@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#endif #endif
} }
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */ #else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { } static inline void printk_all_partitions(void) { }
......
...@@ -14,9 +14,9 @@ struct t10_pi_tuple { ...@@ -14,9 +14,9 @@ struct t10_pi_tuple {
}; };
extern struct blk_integrity t10_pi_type1_crc; extern struct blk_integrity_profile t10_pi_type1_crc;
extern struct blk_integrity t10_pi_type1_ip; extern struct blk_integrity_profile t10_pi_type1_ip;
extern struct blk_integrity t10_pi_type3_crc; extern struct blk_integrity_profile t10_pi_type3_crc;
extern struct blk_integrity t10_pi_type3_ip; extern struct blk_integrity_profile t10_pi_type3_ip;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment