Commit aadd5c59 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move the synchronous flag to queue_limits

Move the synchronous flag into the queue_limits feature field so that it
can be set atomically with the queue frozen.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240617060532.127975-19-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1a02f3a7
...@@ -85,7 +85,6 @@ static const char *const blk_queue_flag_name[] = { ...@@ -85,7 +85,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(FAIL_IO),
QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(NOXMERGES),
QUEUE_FLAG_NAME(SYNCHRONOUS),
QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(INIT_DONE), QUEUE_FLAG_NAME(INIT_DONE),
QUEUE_FLAG_NAME(POLL), QUEUE_FLAG_NAME(POLL),
......
...@@ -335,6 +335,7 @@ static int brd_alloc(int i) ...@@ -335,6 +335,7 @@ static int brd_alloc(int i)
.max_hw_discard_sectors = UINT_MAX, .max_hw_discard_sectors = UINT_MAX,
.max_discard_segments = 1, .max_discard_segments = 1,
.discard_granularity = PAGE_SIZE, .discard_granularity = PAGE_SIZE,
.features = BLK_FEAT_SYNCHRONOUS,
}; };
list_for_each_entry(brd, &brd_devices, brd_list) list_for_each_entry(brd, &brd_devices, brd_list)
...@@ -366,7 +367,6 @@ static int brd_alloc(int i) ...@@ -366,7 +367,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN); strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2); set_capacity(disk, rd_size * 2);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk); err = add_disk(disk);
if (err) if (err)
......
...@@ -2208,7 +2208,8 @@ static int zram_add(void) ...@@ -2208,7 +2208,8 @@ static int zram_add(void)
#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
.max_write_zeroes_sectors = UINT_MAX, .max_write_zeroes_sectors = UINT_MAX,
#endif #endif
.features = BLK_FEAT_STABLE_WRITES, .features = BLK_FEAT_STABLE_WRITES |
BLK_FEAT_SYNCHRONOUS,
}; };
struct zram *zram; struct zram *zram;
int ret, device_id; int ret, device_id;
...@@ -2246,7 +2247,6 @@ static int zram_add(void) ...@@ -2246,7 +2247,6 @@ static int zram_add(void)
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0); set_capacity(zram->disk, 0);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups); ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret) if (ret)
goto out_cleanup_disk; goto out_cleanup_disk;
......
...@@ -1501,6 +1501,7 @@ static int btt_blk_init(struct btt *btt) ...@@ -1501,6 +1501,7 @@ static int btt_blk_init(struct btt *btt)
.logical_block_size = btt->sector_size, .logical_block_size = btt->sector_size,
.max_hw_sectors = UINT_MAX, .max_hw_sectors = UINT_MAX,
.max_integrity_segments = 1, .max_integrity_segments = 1,
.features = BLK_FEAT_SYNCHRONOUS,
}; };
int rc; int rc;
...@@ -1518,8 +1519,6 @@ static int btt_blk_init(struct btt *btt) ...@@ -1518,8 +1519,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->fops = &btt_fops; btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt; btt->btt_disk->private_data = btt;
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
if (rc) if (rc)
......
...@@ -455,7 +455,8 @@ static int pmem_attach_disk(struct device *dev, ...@@ -455,7 +455,8 @@ static int pmem_attach_disk(struct device *dev,
.logical_block_size = pmem_sector_size(ndns), .logical_block_size = pmem_sector_size(ndns),
.physical_block_size = PAGE_SIZE, .physical_block_size = PAGE_SIZE,
.max_hw_sectors = UINT_MAX, .max_hw_sectors = UINT_MAX,
.features = BLK_FEAT_WRITE_CACHE, .features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_SYNCHRONOUS,
}; };
int nid = dev_to_node(dev), fua; int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res; struct resource *res = &nsio->res;
...@@ -546,7 +547,6 @@ static int pmem_attach_disk(struct device *dev, ...@@ -546,7 +547,6 @@ static int pmem_attach_disk(struct device *dev,
} }
pmem->virt_addr = addr; pmem->virt_addr = addr;
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP) if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q); blk_queue_flag_set(QUEUE_FLAG_DAX, q);
......
...@@ -301,6 +301,9 @@ enum { ...@@ -301,6 +301,9 @@ enum {
/* don't modify data until writeback is done */ /* don't modify data until writeback is done */
BLK_FEAT_STABLE_WRITES = (1u << 5), BLK_FEAT_STABLE_WRITES = (1u << 5),
/* always completes in submit context */
BLK_FEAT_SYNCHRONOUS = (1u << 6),
}; };
/* /*
...@@ -566,7 +569,6 @@ struct request_queue { ...@@ -566,7 +569,6 @@ struct request_queue {
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
...@@ -1315,8 +1317,7 @@ static inline bool bdev_nonrot(struct block_device *bdev) ...@@ -1315,8 +1317,7 @@ static inline bool bdev_nonrot(struct block_device *bdev)
static inline bool bdev_synchronous(struct block_device *bdev) static inline bool bdev_synchronous(struct block_device *bdev)
{ {
return test_bit(QUEUE_FLAG_SYNCHRONOUS, return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
&bdev_get_queue(bdev)->queue_flags);
} }
static inline bool bdev_stable_writes(struct block_device *bdev) static inline bool bdev_stable_writes(struct block_device *bdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment