Commit fcf865e3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: convert features and flags to __bitwise types

... and let sparse help us catch mismatches or abuses.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20240626142637.300624-5-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ec9b1cf0
...@@ -288,7 +288,7 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) ...@@ -288,7 +288,7 @@ static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
} }
static ssize_t queue_feature_store(struct request_queue *q, const char *page, static ssize_t queue_feature_store(struct request_queue *q, const char *page,
size_t count, unsigned int feature) size_t count, blk_features_t feature)
{ {
struct queue_limits lim; struct queue_limits lim;
unsigned long val; unsigned long val;
...@@ -418,7 +418,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, ...@@ -418,7 +418,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
static ssize_t queue_poll_show(struct request_queue *q, char *page) static ssize_t queue_poll_show(struct request_queue *q, char *page)
{ {
return queue_var_show(q->limits.features & BLK_FEAT_POLL, page); return queue_var_show(!!(q->limits.features & BLK_FEAT_POLL), page);
} }
static ssize_t queue_poll_store(struct request_queue *q, const char *page, static ssize_t queue_poll_store(struct request_queue *q, const char *page,
...@@ -492,7 +492,7 @@ static ssize_t queue_fua_show(struct request_queue *q, char *page) ...@@ -492,7 +492,7 @@ static ssize_t queue_fua_show(struct request_queue *q, char *page)
static ssize_t queue_dax_show(struct request_queue *q, char *page) static ssize_t queue_dax_show(struct request_queue *q, char *page)
{ {
return queue_var_show(blk_queue_dax(q), page); return queue_var_show(!!blk_queue_dax(q), page);
} }
#define QUEUE_RO_ENTRY(_prefix, _name) \ #define QUEUE_RO_ENTRY(_prefix, _name) \
......
...@@ -283,55 +283,56 @@ static inline bool blk_op_is_passthrough(blk_opf_t op) ...@@ -283,55 +283,56 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
} }
/* flags set by the driver in queue_limits.features */ /* flags set by the driver in queue_limits.features */
enum { typedef unsigned int __bitwise blk_features_t;
/* supports a volatile write cache */
BLK_FEAT_WRITE_CACHE = (1u << 0),
/* supports passing on the FUA bit */ /* supports a volatile write cache */
BLK_FEAT_FUA = (1u << 1), #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
/* rotational device (hard drive or floppy) */ /* supports passing on the FUA bit */
BLK_FEAT_ROTATIONAL = (1u << 2), #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
/* contributes to the random number pool */ /* rotational device (hard drive or floppy) */
BLK_FEAT_ADD_RANDOM = (1u << 3), #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
/* do disk/partitions IO accounting */ /* contributes to the random number pool */
BLK_FEAT_IO_STAT = (1u << 4), #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
/* don't modify data until writeback is done */ /* do disk/partitions IO accounting */
BLK_FEAT_STABLE_WRITES = (1u << 5), #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
/* always completes in submit context */ /* don't modify data until writeback is done */
BLK_FEAT_SYNCHRONOUS = (1u << 6), #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
/* supports REQ_NOWAIT */ /* always completes in submit context */
BLK_FEAT_NOWAIT = (1u << 7), #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
/* supports DAX */ /* supports REQ_NOWAIT */
BLK_FEAT_DAX = (1u << 8), #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
/* supports I/O polling */ /* supports DAX */
BLK_FEAT_POLL = (1u << 9), #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
/* is a zoned device */ /* supports I/O polling */
BLK_FEAT_ZONED = (1u << 10), #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
/* supports Zone Reset All */ /* is a zoned device */
BLK_FEAT_ZONE_RESETALL = (1u << 11), #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
/* supports PCI(e) p2p requests */ /* supports Zone Reset All */
BLK_FEAT_PCI_P2PDMA = (1u << 12), #define BLK_FEAT_ZONE_RESETALL ((__force blk_features_t)(1u << 11))
/* skip this queue in blk_mq_(un)quiesce_tagset */ /* supports PCI(e) p2p requests */
BLK_FEAT_SKIP_TAGSET_QUIESCE = (1u << 13), #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
/* bounce all highmem pages */ /* skip this queue in blk_mq_(un)quiesce_tagset */
BLK_FEAT_BOUNCE_HIGH = (1u << 14), #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
/* undocumented magic for bcache */ /* bounce all highmem pages */
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15), #define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14))
};
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
/* /*
* Flags automatically inherited when stacking limits. * Flags automatically inherited when stacking limits.
...@@ -342,17 +343,17 @@ enum { ...@@ -342,17 +343,17 @@ enum {
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */ /* internal flags in queue_limits.flags */
enum { typedef unsigned int __bitwise blk_flags_t;
/* do not send FLUSH/FUA commands despite advertising a write cache */
BLK_FLAG_WRITE_CACHE_DISABLED = (1u << 0),
/* I/O topology is misaligned */ /* do not send FLUSH/FUA commands despite advertising a write cache */
BLK_FLAG_MISALIGNED = (1u << 1), #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
};
/* I/O topology is misaligned */
#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
struct queue_limits { struct queue_limits {
unsigned int features; blk_features_t features;
unsigned int flags; blk_flags_t flags;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask; unsigned long virt_boundary_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment