Commit 7e0d574f authored by Bart Van Assche's avatar Bart Van Assche Committed by Mike Snitzer

dm: introduce enum dm_queue_mode to cleanup related code

Introduce an enumeration type for the queue mode.  This patch does
not change any functionality but makes the DM code easier to read.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent b194679f
...@@ -47,7 +47,7 @@ struct mapped_device { ...@@ -47,7 +47,7 @@ struct mapped_device {
struct request_queue *queue; struct request_queue *queue;
int numa_node_id; int numa_node_id;
unsigned type; enum dm_queue_mode type;
/* Protect queue and type against concurrent access. */ /* Protect queue and type against concurrent access. */
struct mutex type_lock; struct mutex type_lock;
......
...@@ -1260,7 +1260,7 @@ static int populate_table(struct dm_table *table, ...@@ -1260,7 +1260,7 @@ static int populate_table(struct dm_table *table,
return dm_table_complete(table); return dm_table_complete(table);
} }
static bool is_valid_type(unsigned cur, unsigned new) static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
{ {
if (cur == new || if (cur == new ||
(cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
......
...@@ -90,7 +90,7 @@ struct multipath { ...@@ -90,7 +90,7 @@ struct multipath {
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */ atomic_t pg_init_count; /* Number of times pg_init called */
unsigned queue_mode; enum dm_queue_mode queue_mode;
struct mutex work_mutex; struct mutex work_mutex;
struct work_struct trigger_event; struct work_struct trigger_event;
...@@ -1700,6 +1700,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type, ...@@ -1700,6 +1700,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_MQ_REQUEST_BASED: case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq "); DMEMIT("queue_mode mq ");
break; break;
default:
WARN_ON_ONCE(true);
break;
} }
} }
} }
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
struct dm_table { struct dm_table {
struct mapped_device *md; struct mapped_device *md;
unsigned type; enum dm_queue_mode type;
/* btree table */ /* btree table */
unsigned int depth; unsigned int depth;
...@@ -825,19 +825,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) ...@@ -825,19 +825,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
} }
EXPORT_SYMBOL(dm_consume_args); EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(unsigned table_type) static bool __table_type_bio_based(enum dm_queue_mode table_type)
{ {
return (table_type == DM_TYPE_BIO_BASED || return (table_type == DM_TYPE_BIO_BASED ||
table_type == DM_TYPE_DAX_BIO_BASED); table_type == DM_TYPE_DAX_BIO_BASED);
} }
static bool __table_type_request_based(unsigned table_type) static bool __table_type_request_based(enum dm_queue_mode table_type)
{ {
return (table_type == DM_TYPE_REQUEST_BASED || return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED); table_type == DM_TYPE_MQ_REQUEST_BASED);
} }
void dm_table_set_type(struct dm_table *t, unsigned type) void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
{ {
t->type = type; t->type = type;
} }
...@@ -879,7 +879,7 @@ static int dm_table_determine_type(struct dm_table *t) ...@@ -879,7 +879,7 @@ static int dm_table_determine_type(struct dm_table *t)
struct dm_target *tgt; struct dm_target *tgt;
struct dm_dev_internal *dd; struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t); struct list_head *devices = dm_table_get_devices(t);
unsigned live_md_type = dm_get_md_type(t->md); enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
if (t->type != DM_TYPE_NONE) { if (t->type != DM_TYPE_NONE) {
/* target already set the table's type */ /* target already set the table's type */
...@@ -988,7 +988,7 @@ static int dm_table_determine_type(struct dm_table *t) ...@@ -988,7 +988,7 @@ static int dm_table_determine_type(struct dm_table *t)
return 0; return 0;
} }
unsigned dm_table_get_type(struct dm_table *t) enum dm_queue_mode dm_table_get_type(struct dm_table *t)
{ {
return t->type; return t->type;
} }
...@@ -1039,7 +1039,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t) ...@@ -1039,7 +1039,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t)
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{ {
unsigned type = dm_table_get_type(t); enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0; unsigned per_io_data_size = 0;
struct dm_target *tgt; struct dm_target *tgt;
unsigned i; unsigned i;
......
...@@ -1807,13 +1807,13 @@ void dm_unlock_md_type(struct mapped_device *md) ...@@ -1807,13 +1807,13 @@ void dm_unlock_md_type(struct mapped_device *md)
mutex_unlock(&md->type_lock); mutex_unlock(&md->type_lock);
} }
void dm_set_md_type(struct mapped_device *md, unsigned type) void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
{ {
BUG_ON(!mutex_is_locked(&md->type_lock)); BUG_ON(!mutex_is_locked(&md->type_lock));
md->type = type; md->type = type;
} }
unsigned dm_get_md_type(struct mapped_device *md) enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
{ {
return md->type; return md->type;
} }
...@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits); ...@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{ {
int r; int r;
unsigned type = dm_get_md_type(md); enum dm_queue_mode type = dm_get_md_type(md);
switch (type) { switch (type) {
case DM_TYPE_REQUEST_BASED: case DM_TYPE_REQUEST_BASED:
...@@ -1871,6 +1871,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) ...@@ -1871,6 +1871,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (type == DM_TYPE_DAX_BIO_BASED) if (type == DM_TYPE_DAX_BIO_BASED)
queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
break; break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
} }
return 0; return 0;
...@@ -2556,7 +2559,7 @@ int dm_noflush_suspending(struct dm_target *ti) ...@@ -2556,7 +2559,7 @@ int dm_noflush_suspending(struct dm_target *ti)
} }
EXPORT_SYMBOL_GPL(dm_noflush_suspending); EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_io_data_size) unsigned integrity, unsigned per_io_data_size)
{ {
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
......
...@@ -64,7 +64,7 @@ void dm_table_presuspend_undo_targets(struct dm_table *t); ...@@ -64,7 +64,7 @@ void dm_table_presuspend_undo_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_congested(struct dm_table *t, int bdi_bits);
unsigned dm_table_get_type(struct dm_table *t); enum dm_queue_mode dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
...@@ -76,8 +76,8 @@ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); ...@@ -76,8 +76,8 @@ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
void dm_lock_md_type(struct mapped_device *md); void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type); void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
unsigned dm_get_md_type(struct mapped_device *md); enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
struct target_type *dm_get_immutable_target_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
...@@ -204,7 +204,7 @@ void dm_kcopyd_exit(void); ...@@ -204,7 +204,7 @@ void dm_kcopyd_exit(void);
/* /*
* Mempool operations * Mempool operations
*/ */
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
unsigned integrity, unsigned per_bio_data_size); unsigned integrity, unsigned per_bio_data_size);
void dm_free_md_mempools(struct dm_md_mempools *pools); void dm_free_md_mempools(struct dm_md_mempools *pools);
......
...@@ -22,11 +22,13 @@ struct bio_vec; ...@@ -22,11 +22,13 @@ struct bio_vec;
/* /*
* Type of table, mapped_device's mempool and request_queue * Type of table, mapped_device's mempool and request_queue
*/ */
#define DM_TYPE_NONE 0 enum dm_queue_mode {
#define DM_TYPE_BIO_BASED 1 DM_TYPE_NONE = 0,
#define DM_TYPE_REQUEST_BASED 2 DM_TYPE_BIO_BASED = 1,
#define DM_TYPE_MQ_REQUEST_BASED 3 DM_TYPE_REQUEST_BASED = 2,
#define DM_TYPE_DAX_BIO_BASED 4 DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
...@@ -476,7 +478,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback ...@@ -476,7 +478,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
* Useful for "hybrid" target (supports both bio-based * Useful for "hybrid" target (supports both bio-based
* and request-based). * and request-based).
*/ */
void dm_table_set_type(struct dm_table *t, unsigned type); void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
/* /*
* Finally call this to make the table ready for use. * Finally call this to make the table ready for use.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment