Commit e556f6ba authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove the bd_queue field from struct block_device

Just use bd_disk->queue instead.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6b7b181b
...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(bdev_dax_pgoff); ...@@ -59,7 +59,7 @@ EXPORT_SYMBOL(bdev_dax_pgoff);
#if IS_ENABLED(CONFIG_FS_DAX) #if IS_ENABLED(CONFIG_FS_DAX)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{ {
if (!blk_queue_dax(bdev->bd_queue)) if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL; return NULL;
return dax_get_by_host(bdev->bd_disk->disk_name); return dax_get_by_host(bdev->bd_disk->disk_name);
} }
......
...@@ -199,7 +199,7 @@ static int rdevs_init_serial(struct mddev *mddev) ...@@ -199,7 +199,7 @@ static int rdevs_init_serial(struct mddev *mddev)
static int rdev_need_serial(struct md_rdev *rdev) static int rdev_need_serial(struct md_rdev *rdev)
{ {
return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
rdev->bdev->bd_queue->nr_hw_queues != 1 && rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
test_bit(WriteMostly, &rdev->flags)); test_bit(WriteMostly, &rdev->flags));
} }
......
...@@ -467,7 +467,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) ...@@ -467,7 +467,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return -EINVAL; return -EINVAL;
} }
if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) { if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
pr_err("peer-to-peer DMA is not supported by the driver of %s\n", pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
ns->device_path); ns->device_path);
return -EINVAL; return -EINVAL;
......
...@@ -693,12 +693,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, ...@@ -693,12 +693,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev)) if (!ops->rw_page || bdev_get_integrity(bdev))
return result; return result;
result = blk_queue_enter(bdev->bd_queue, 0); result = blk_queue_enter(bdev->bd_disk->queue, 0);
if (result) if (result)
return result; return result;
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
REQ_OP_READ); REQ_OP_READ);
blk_queue_exit(bdev->bd_queue); blk_queue_exit(bdev->bd_disk->queue);
return result; return result;
} }
...@@ -729,7 +729,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, ...@@ -729,7 +729,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev)) if (!ops->rw_page || bdev_get_integrity(bdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
result = blk_queue_enter(bdev->bd_queue, 0); result = blk_queue_enter(bdev->bd_disk->queue, 0);
if (result) if (result)
return result; return result;
...@@ -742,7 +742,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, ...@@ -742,7 +742,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
clean_page_buffers(page); clean_page_buffers(page);
unlock_page(page); unlock_page(page);
} }
blk_queue_exit(bdev->bd_queue); blk_queue_exit(bdev->bd_disk->queue);
return result; return result;
} }
...@@ -1568,7 +1568,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1568,7 +1568,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (!bdev->bd_openers) { if (!bdev->bd_openers) {
first_open = true; first_open = true;
bdev->bd_disk = disk; bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev; bdev->bd_contains = bdev;
bdev->bd_partno = partno; bdev->bd_partno = partno;
...@@ -1589,7 +1588,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1589,7 +1588,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
disk_put_part(bdev->bd_part); disk_put_part(bdev->bd_part);
bdev->bd_part = NULL; bdev->bd_part = NULL;
bdev->bd_disk = NULL; bdev->bd_disk = NULL;
bdev->bd_queue = NULL;
mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdev->bd_mutex);
disk_unblock_events(disk); disk_unblock_events(disk);
put_disk_and_module(disk); put_disk_and_module(disk);
...@@ -1666,7 +1664,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1666,7 +1664,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
disk_put_part(bdev->bd_part); disk_put_part(bdev->bd_part);
bdev->bd_disk = NULL; bdev->bd_disk = NULL;
bdev->bd_part = NULL; bdev->bd_part = NULL;
bdev->bd_queue = NULL;
if (bdev != bdev->bd_contains) if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1); __blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL; bdev->bd_contains = NULL;
......
...@@ -1387,8 +1387,8 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, ...@@ -1387,8 +1387,8 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
* Attempt to prefetch the pieces we likely need later. * Attempt to prefetch the pieces we likely need later.
*/ */
prefetch(&bdev->bd_disk->part_tbl); prefetch(&bdev->bd_disk->part_tbl);
prefetch(bdev->bd_queue); prefetch(bdev->bd_disk->queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES);
return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
end_io, submit_io, flags); end_io, submit_io, flags);
......
...@@ -132,5 +132,5 @@ xfs_pwork_guess_datadev_parallelism( ...@@ -132,5 +132,5 @@ xfs_pwork_guess_datadev_parallelism(
* For now we'll go with the most conservative setting possible, * For now we'll go with the most conservative setting possible,
* which is two threads for an SSD and 1 thread everywhere else. * which is two threads for an SSD and 1 thread everywhere else.
*/ */
return blk_queue_nonrot(btp->bt_bdev->bd_queue) ? 2 : 1; return blk_queue_nonrot(btp->bt_bdev->bd_disk->queue) ? 2 : 1;
} }
...@@ -39,7 +39,6 @@ struct block_device { ...@@ -39,7 +39,6 @@ struct block_device {
unsigned bd_part_count; unsigned bd_part_count;
int bd_invalidated; int bd_invalidated;
struct gendisk * bd_disk; struct gendisk * bd_disk;
struct request_queue * bd_queue;
struct backing_dev_info *bd_bdi; struct backing_dev_info *bd_bdi;
struct list_head bd_list; struct list_head bd_list;
/* /*
......
...@@ -2929,7 +2929,7 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) ...@@ -2929,7 +2929,7 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
* write only restriction. Hence zoned block devices are not * write only restriction. Hence zoned block devices are not
* suitable for swapping. Disallow them here. * suitable for swapping. Disallow them here.
*/ */
if (blk_queue_is_zoned(p->bdev->bd_queue)) if (blk_queue_is_zoned(p->bdev->bd_disk->queue))
return -EINVAL; return -EINVAL;
p->flags |= SWP_BLKDEV; p->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) { } else if (S_ISREG(inode->i_mode)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment