Commit dc469ba2 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block/bfq: Use the new blk_opf_t type

Use the new blk_opf_t type for arguments and variables that represent
request flags or a bitwise combination of a request operation and
request flags. Rename those variables from 'op' into 'opf'.

This patch does not change any functionality.

Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-8-bvanassche@acm.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 16458cf3
...@@ -220,46 +220,46 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) ...@@ -220,46 +220,46 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
} }
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op) blk_opf_t opf)
{ {
blkg_rwstat_add(&bfqg->stats.queued, op, 1); blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
bfqg_stats_end_empty_time(&bfqg->stats); bfqg_stats_end_empty_time(&bfqg->stats);
if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
} }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
{ {
blkg_rwstat_add(&bfqg->stats.queued, op, -1); blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
} }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
{ {
blkg_rwstat_add(&bfqg->stats.merged, op, 1); blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
} }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op) u64 io_start_time_ns, blk_opf_t opf)
{ {
struct bfqg_stats *stats = &bfqg->stats; struct bfqg_stats *stats = &bfqg->stats;
u64 now = ktime_get_ns(); u64 now = ktime_get_ns();
if (now > io_start_time_ns) if (now > io_start_time_ns)
blkg_rwstat_add(&stats->service_time, op, blkg_rwstat_add(&stats->service_time, opf,
now - io_start_time_ns); now - io_start_time_ns);
if (io_start_time_ns > start_time_ns) if (io_start_time_ns > start_time_ns)
blkg_rwstat_add(&stats->wait_time, op, blkg_rwstat_add(&stats->wait_time, opf,
io_start_time_ns - start_time_ns); io_start_time_ns - start_time_ns);
} }
#else /* CONFIG_BFQ_CGROUP_DEBUG */ #else /* CONFIG_BFQ_CGROUP_DEBUG */
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op) { } blk_opf_t opf) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op) { } u64 io_start_time_ns, blk_opf_t opf) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
......
...@@ -668,19 +668,19 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit) ...@@ -668,19 +668,19 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
* significantly affect service guarantees coming from the BFQ scheduling * significantly affect service guarantees coming from the BFQ scheduling
* algorithm. * algorithm.
*/ */
static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
{ {
struct bfq_data *bfqd = data->q->elevator->elevator_data; struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct bfq_io_cq *bic = bfq_bic_lookup(data->q); struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(op)) : NULL; struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(opf)) : NULL;
int depth; int depth;
unsigned limit = data->q->nr_requests; unsigned limit = data->q->nr_requests;
/* Sync reads have full depth available */ /* Sync reads have full depth available */
if (op_is_sync(op) && !op_is_write(op)) { if (op_is_sync(opf) && !op_is_write(opf)) {
depth = 0; depth = 0;
} else { } else {
depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
limit = (limit * depth) >> bfqd->full_depth_shift; limit = (limit * depth) >> bfqd->full_depth_shift;
} }
...@@ -693,7 +693,7 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) ...@@ -693,7 +693,7 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
depth = 1; depth = 1;
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
__func__, bfqd->wr_busy_queues, op_is_sync(op), depth); __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
if (depth) if (depth)
data->shallow_depth = depth; data->shallow_depth = depth;
} }
...@@ -6104,7 +6104,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) ...@@ -6104,7 +6104,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
static void bfq_update_insert_stats(struct request_queue *q, static void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq, struct bfq_queue *bfqq,
bool idle_timer_disabled, bool idle_timer_disabled,
unsigned int cmd_flags) blk_opf_t cmd_flags)
{ {
if (!bfqq) if (!bfqq)
return; return;
...@@ -6129,7 +6129,7 @@ static void bfq_update_insert_stats(struct request_queue *q, ...@@ -6129,7 +6129,7 @@ static void bfq_update_insert_stats(struct request_queue *q,
static inline void bfq_update_insert_stats(struct request_queue *q, static inline void bfq_update_insert_stats(struct request_queue *q,
struct bfq_queue *bfqq, struct bfq_queue *bfqq,
bool idle_timer_disabled, bool idle_timer_disabled,
unsigned int cmd_flags) {} blk_opf_t cmd_flags) {}
#endif /* CONFIG_BFQ_CGROUP_DEBUG */ #endif /* CONFIG_BFQ_CGROUP_DEBUG */
static struct bfq_queue *bfq_init_rq(struct request *rq); static struct bfq_queue *bfq_init_rq(struct request *rq);
...@@ -6141,7 +6141,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -6141,7 +6141,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
bool idle_timer_disabled = false; bool idle_timer_disabled = false;
unsigned int cmd_flags; blk_opf_t cmd_flags;
LIST_HEAD(free); LIST_HEAD(free);
#ifdef CONFIG_BFQ_GROUP_IOSCHED #ifdef CONFIG_BFQ_GROUP_IOSCHED
......
...@@ -994,11 +994,11 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); ...@@ -994,11 +994,11 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq); void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
unsigned int op); blk_opf_t opf);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op); void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op); void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, unsigned int op); u64 io_start_time_ns, blk_opf_t opf);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg); void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg); void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg); void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment