Commit 060a72a2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.9/block-merge-20200804' of git://git.kernel.dk/linux-block

Pull block stacking updates from Jens Axboe:
 "The stacking related fixes depended on both the core block and drivers
  branches, so here's a topic branch with that change.

  Outside of that, a late fix from Johannes for zone revalidation"

* tag 'for-5.9/block-merge-20200804' of git://git.kernel.dk/linux-block:
  block: don't do revalidate zones on invalid devices
  block: remove blk_queue_stack_limits
  block: remove bdev_stack_limits
  block: inherit the zoned characteristics in blk_stack_limits
parents 22fcffeb 1a1206dc
...@@ -455,17 +455,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt) ...@@ -455,17 +455,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
} }
EXPORT_SYMBOL(blk_queue_io_opt); EXPORT_SYMBOL(blk_queue_io_opt);
/**
* blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
* @t: the stacking driver (top)
* @b: the underlying device (bottom)
**/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);
}
EXPORT_SYMBOL(blk_queue_stack_limits);
/** /**
* blk_stack_limits - adjust queue_limits for stacked devices * blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device) * @t: the stacking driver limits (top device)
...@@ -609,32 +598,11 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -609,32 +598,11 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->chunk_sectors = min_not_zero(t->chunk_sectors, t->chunk_sectors = min_not_zero(t->chunk_sectors,
b->chunk_sectors); b->chunk_sectors);
t->zoned = max(t->zoned, b->zoned);
return ret; return ret;
} }
EXPORT_SYMBOL(blk_stack_limits); EXPORT_SYMBOL(blk_stack_limits);
/**
* bdev_stack_limits - adjust queue limits for stacked drivers
* @t: the stacking driver limits (top device)
* @bdev: the component block_device (bottom)
* @start: first data sector within component device
*
* Description:
* Merges queue limits for a top device and a block_device. Returns
* 0 if alignment didn't change. Returns -1 if adding the bottom
* device caused misalignment.
*/
int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t start)
{
struct request_queue *bq = bdev_get_queue(bdev);
start += get_start_sect(bdev);
return blk_stack_limits(t, &bq->limits, start);
}
EXPORT_SYMBOL(bdev_stack_limits);
/** /**
* disk_stack_limits - adjust queue limits for stacked drivers * disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top) * @disk: MD/DM gendisk (top)
...@@ -650,7 +618,8 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, ...@@ -650,7 +618,8 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
{ {
struct request_queue *t = disk->queue; struct request_queue *t = disk->queue;
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
get_start_sect(bdev) + (offset >> 9)) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top); disk_name(disk, 0, top);
......
...@@ -498,6 +498,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ...@@ -498,6 +498,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
if (WARN_ON_ONCE(!queue_is_mq(q))) if (WARN_ON_ONCE(!queue_is_mq(q)))
return -EIO; return -EIO;
if (!get_capacity(disk))
return -EIO;
/* /*
* Ensure that all memory allocations in this context are done as if * Ensure that all memory allocations in this context are done as if
* GFP_NOIO was specified. * GFP_NOIO was specified.
......
...@@ -1250,7 +1250,7 @@ static void fixup_discard_if_not_supported(struct request_queue *q) ...@@ -1250,7 +1250,7 @@ static void fixup_discard_if_not_supported(struct request_queue *q)
static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q) static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
{ {
/* Fixup max_write_zeroes_sectors after blk_queue_stack_limits(): /* Fixup max_write_zeroes_sectors after blk_stack_limits():
* if we can handle "zeroes" efficiently on the protocol, * if we can handle "zeroes" efficiently on the protocol,
* we want to do that, even if our backend does not announce * we want to do that, even if our backend does not announce
* max_write_zeroes_sectors itself. */ * max_write_zeroes_sectors itself. */
...@@ -1361,7 +1361,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi ...@@ -1361,7 +1361,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
decide_on_write_same_support(device, q, b, o, disable_write_same); decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) { if (b) {
blk_queue_stack_limits(q, b); blk_stack_limits(&q->limits, &b->limits, 0);
if (q->backing_dev_info->ra_pages != if (q->backing_dev_info->ra_pages !=
b->backing_dev_info->ra_pages) { b->backing_dev_info->ra_pages) {
......
...@@ -458,7 +458,8 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, ...@@ -458,7 +458,8 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0; return 0;
} }
if (bdev_stack_limits(limits, bdev, start) < 0) if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %s caused an alignment inconsistency: " DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, " "physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu", "alignment_offset=%u, start=%llu",
...@@ -467,9 +468,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, ...@@ -467,9 +468,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.logical_block_size, q->limits.logical_block_size,
q->limits.alignment_offset, q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT); (unsigned long long) start << SECTOR_SHIFT);
limits->zoned = blk_queue_zoned_model(q);
return 0; return 0;
} }
...@@ -1528,22 +1526,6 @@ int dm_calculate_queue_limits(struct dm_table *table, ...@@ -1528,22 +1526,6 @@ int dm_calculate_queue_limits(struct dm_table *table,
dm_device_name(table->md), dm_device_name(table->md),
(unsigned long long) ti->begin, (unsigned long long) ti->begin,
(unsigned long long) ti->len); (unsigned long long) ti->len);
/*
* FIXME: this should likely be moved to blk_stack_limits(), would
* also eliminate limits->zoned stacking hack in dm_set_device_limits()
*/
if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
/*
* By default, the stacked limits zoned model is set to
* BLK_ZONED_NONE in blk_set_stacking_limits(). Update
* this model using the first target model reported
* that is not BLK_ZONED_NONE. This will be either the
* first target device zoned model or the model reported
* by the target .io_hints.
*/
limits->zoned = ti_limits.zoned;
}
} }
/* /*
......
...@@ -2081,7 +2081,8 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) ...@@ -2081,7 +2081,8 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) { if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id); nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue); blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
nvme_mpath_update_disk_size(ns->head->disk); nvme_mpath_update_disk_size(ns->head->disk);
} }
#endif #endif
......
...@@ -306,9 +306,12 @@ enum blk_queue_state { ...@@ -306,9 +306,12 @@ enum blk_queue_state {
/* /*
* Zoned block device models (zoned limit). * Zoned block device models (zoned limit).
*
* Note: This needs to be ordered from the least to the most severe
* restrictions for the inheritance in blk_stack_limits() to work.
*/ */
enum blk_zoned_model { enum blk_zoned_model {
BLK_ZONED_NONE, /* Regular block device */ BLK_ZONED_NONE = 0, /* Regular block device */
BLK_ZONED_HA, /* Host-aware zoned block device */ BLK_ZONED_HA, /* Host-aware zoned block device */
BLK_ZONED_HM, /* Host-managed zoned block device */ BLK_ZONED_HM, /* Host-managed zoned block device */
}; };
...@@ -1136,11 +1139,8 @@ extern void blk_set_default_limits(struct queue_limits *lim); ...@@ -1136,11 +1139,8 @@ extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset); sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset); sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment