Commit 61789765 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-3.16/core' into for-3.16/drivers

mtip32xx uses blk_mq_alloc_reserved_request(), so pull in the
core changes so we have a properly merged end result.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parents 6314a108 d852564f
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
!Efs/mpage.c !Efs/mpage.c
!Efs/namei.c !Efs/namei.c
!Efs/buffer.c !Efs/buffer.c
!Efs/bio.c !Eblock/bio.c
!Efs/seq_file.c !Efs/seq_file.c
!Efs/filesystems.c !Efs/filesystems.c
!Efs/fs-writeback.c !Efs/fs-writeback.c
......
...@@ -2,13 +2,15 @@ ...@@ -2,13 +2,15 @@
# Makefile for the kernel block layer # Makefile for the kernel block layer
# #
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o partitions/ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
partitions/
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
...@@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o ...@@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
...@@ -617,7 +617,7 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size) ...@@ -617,7 +617,7 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
if (!bs->bio_integrity_pool) if (!bs->bio_integrity_pool)
return -1; return -1;
bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); bs->bvec_integrity_pool = biovec_create_pool(pool_size);
if (!bs->bvec_integrity_pool) { if (!bs->bvec_integrity_pool) {
mempool_destroy(bs->bio_integrity_pool); mempool_destroy(bs->bio_integrity_pool);
return -1; return -1;
......
...@@ -305,6 +305,8 @@ static void bio_chain_endio(struct bio *bio, int error) ...@@ -305,6 +305,8 @@ static void bio_chain_endio(struct bio *bio, int error)
/** /**
* bio_chain - chain bio completions * bio_chain - chain bio completions
* @bio: the target bio
* @parent: the @bio's parent bio
* *
* The caller won't have a bi_end_io called when @bio completes - instead, * The caller won't have a bi_end_io called when @bio completes - instead,
* @parent's bi_end_io won't be called until both @parent and @bio have * @parent's bi_end_io won't be called until both @parent and @bio have
...@@ -1011,8 +1013,7 @@ static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, ...@@ -1011,8 +1013,7 @@ static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
bio->bi_private = bmd; bio->bi_private = bmd;
} }
static struct bio_map_data *bio_alloc_map_data(int nr_segs, static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
unsigned int iov_count,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
if (iov_count > UIO_MAXIOV) if (iov_count > UIO_MAXIOV)
...@@ -1154,7 +1155,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -1154,7 +1155,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (offset) if (offset)
nr_pages++; nr_pages++;
bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask); bmd = bio_alloc_map_data(iov_count, gfp_mask);
if (!bmd) if (!bmd)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1859,7 +1860,7 @@ EXPORT_SYMBOL_GPL(bio_trim); ...@@ -1859,7 +1860,7 @@ EXPORT_SYMBOL_GPL(bio_trim);
* create memory pools for biovec's in a bio_set. * create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use. * use the global biovec slabs created for general use.
*/ */
mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries) mempool_t *biovec_create_pool(int pool_entries)
{ {
struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
...@@ -1922,7 +1923,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) ...@@ -1922,7 +1923,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool) if (!bs->bio_pool)
goto bad; goto bad;
bs->bvec_pool = biovec_create_pool(bs, pool_size); bs->bvec_pool = biovec_create_pool(pool_size);
if (!bs->bvec_pool) if (!bs->bvec_pool)
goto bad; goto bad;
......
...@@ -576,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -576,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q) if (!q)
return NULL; return NULL;
if (percpu_counter_init(&q->mq_usage_counter, 0))
goto fail_q;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0) if (q->id < 0)
goto fail_c; goto fail_q;
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
...@@ -639,8 +636,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -639,8 +636,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
fail_id: fail_id:
ida_simple_remove(&blk_queue_ida, q->id); ida_simple_remove(&blk_queue_ida, q->id);
fail_c:
percpu_counter_destroy(&q->mq_usage_counter);
fail_q: fail_q:
kmem_cache_free(blk_requestq_cachep, q); kmem_cache_free(blk_requestq_cachep, q);
return NULL; return NULL;
...@@ -848,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags) ...@@ -848,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags)
__freed_request(rl, sync ^ 1); __freed_request(rl, sync ^ 1);
} }
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct request_list *rl;
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
/* congestion isn't cgroup aware and follows root blkcg for now */
rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return 0;
}
/* /*
* Determine if elevator data should be initialized when allocating the * Determine if elevator data should be initialized when allocating the
* request associated with @bio. * request associated with @bio.
...@@ -1137,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, ...@@ -1137,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{ {
if (q->mq_ops) if (q->mq_ops)
return blk_mq_alloc_request(q, rw, gfp_mask); return blk_mq_alloc_request(q, rw, gfp_mask, false);
else else
return blk_old_get_request(q, rw, gfp_mask); return blk_old_get_request(q, rw, gfp_mask);
} }
...@@ -1233,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq, ...@@ -1233,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
static void part_round_stats_single(int cpu, struct hd_struct *part, static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now) unsigned long now)
{ {
int inflight;
if (now == part->stamp) if (now == part->stamp)
return; return;
if (part_in_flight(part)) { inflight = part_in_flight(part);
if (inflight) {
__part_stat_add(cpu, part, time_in_queue, __part_stat_add(cpu, part, time_in_queue,
part_in_flight(part) * (now - part->stamp)); inflight * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
} }
part->stamp = now; part->stamp = now;
...@@ -1427,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, ...@@ -1427,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
* added on the elevator at this point. In addition, we don't have * added on the elevator at this point. In addition, we don't have
* reliable access to the elevator outside queue lock. Only check basic * reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator. * merging parameters without querying the elevator.
*
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/ */
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count) unsigned int *request_count)
...@@ -1436,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1436,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
bool ret = false; bool ret = false;
struct list_head *plug_list; struct list_head *plug_list;
if (blk_queue_nomerges(q))
goto out;
plug = current->plug; plug = current->plug;
if (!plug) if (!plug)
goto out; goto out;
...@@ -1517,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1517,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing * Check if we can merge with the plugged list before grabbing
* any locks. * any locks.
*/ */
if (blk_attempt_plug_merge(q, bio, &request_count)) if (!blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count))
return; return;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
......
...@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq) ...@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq); blk_clear_rq_complete(rq);
} }
static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
rq = container_of(work, struct request, requeue_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_insert_request(rq, false, true, false);
}
static bool blk_flush_queue_rq(struct request *rq, bool add_front) static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{ {
if (rq->q->mq_ops) { if (rq->q->mq_ops) {
INIT_WORK(&rq->requeue_work, mq_flush_run); struct request_queue *q = rq->q;
kblockd_schedule_work(&rq->requeue_work);
blk_mq_add_to_requeue_list(rq, add_front);
blk_mq_kick_requeue_list(q);
return false; return false;
} else { } else {
if (add_front) if (add_front)
......
...@@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete); ...@@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete);
* iopoll handler will not be invoked again before blk_iopoll_sched_prep() * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
* is called. * is called.
**/ **/
void blk_iopoll_complete(struct blk_iopoll *iopoll) void blk_iopoll_complete(struct blk_iopoll *iop)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
__blk_iopoll_complete(iopoll); __blk_iopoll_complete(iop);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(blk_iopoll_complete); EXPORT_SYMBOL(blk_iopoll_complete);
......
...@@ -226,7 +226,7 @@ EXPORT_SYMBOL(blkdev_issue_write_same); ...@@ -226,7 +226,7 @@ EXPORT_SYMBOL(blkdev_issue_write_same);
* Generate and issue number of bios with zerofiled pages. * Generate and issue number of bios with zerofiled pages.
*/ */
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask) sector_t nr_sects, gfp_t gfp_mask)
{ {
int ret; int ret;
......
...@@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, ...@@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
{ {
unsigned int cpu = (unsigned long) hcpu; unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify; struct blk_mq_cpu_notifier *notify;
int ret = NOTIFY_OK;
raw_spin_lock(&blk_mq_cpu_notify_lock); raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
notify->notify(notify->data, action, cpu); ret = notify->notify(notify->data, action, cpu);
if (ret != NOTIFY_OK)
break;
}
raw_spin_unlock(&blk_mq_cpu_notify_lock); raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK; return ret;
} }
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
...@@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) ...@@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
} }
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int), int (*fn)(void *, unsigned long, unsigned int),
void *data) void *data)
{ {
notifier->notify = fn; notifier->notify = fn;
......
...@@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) ...@@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
kfree(map); kfree(map);
return NULL; return NULL;
} }
/*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
if (index == mq_map[i])
return cpu_to_node(i);
}
return NUMA_NO_NODE;
}
...@@ -203,45 +203,14 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, ...@@ -203,45 +203,14 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
return ret; return ret;
} }
static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
{
ssize_t ret;
spin_lock(&hctx->lock);
ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
spin_unlock(&hctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t len)
{ {
struct blk_mq_ctx *ctx; return blk_mq_tag_sysfs_show(hctx->tags, page);
unsigned long ret;
unsigned int i;
if (kstrtoul(page, 10, &ret)) {
pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
return -EINVAL;
}
spin_lock(&hctx->lock);
if (ret)
hctx->flags |= BLK_MQ_F_SHOULD_IPI;
else
hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
spin_unlock(&hctx->lock);
hctx_for_each_ctx(hctx, ctx, i)
ctx->ipi_redirect = !!ret;
return len;
} }
static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
{ {
return blk_mq_tag_sysfs_show(hctx->tags, page); return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
} }
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
...@@ -303,15 +272,14 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { ...@@ -303,15 +272,14 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO }, .attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_dispatched_show, .show = blk_mq_hw_sysfs_dispatched_show,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
.attr = {.name = "active", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_active_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
.attr = {.name = "pending", .mode = S_IRUGO }, .attr = {.name = "pending", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_rq_list_show, .show = blk_mq_hw_sysfs_rq_list_show,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
.attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
.show = blk_mq_hw_sysfs_ipi_show,
.store = blk_mq_hw_sysfs_ipi_store,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO }, .attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show, .show = blk_mq_hw_sysfs_tags_show,
...@@ -326,9 +294,9 @@ static struct attribute *default_hw_ctx_attrs[] = { ...@@ -326,9 +294,9 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_run.attr,
&blk_mq_hw_sysfs_dispatched.attr, &blk_mq_hw_sysfs_dispatched.attr,
&blk_mq_hw_sysfs_pending.attr, &blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_tags.attr,
&blk_mq_hw_sysfs_cpus.attr, &blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
NULL, NULL,
}; };
......
This diff is collapsed.
#ifndef INT_BLK_MQ_TAG_H #ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H
#include <linux/percpu_ida.h> #include "blk-mq.h"
enum {
BT_WAIT_QUEUES = 8,
BT_WAIT_BATCH = 8,
};
struct bt_wait_state {
atomic_t wait_cnt;
wait_queue_head_t wait;
} ____cacheline_aligned_in_smp;
#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
struct blk_mq_bitmap_tags {
unsigned int depth;
unsigned int wake_cnt;
unsigned int bits_per_word;
unsigned int map_nr;
struct blk_align_bitmap *map;
unsigned int wake_index;
struct bt_wait_state *bs;
};
/* /*
* Tag address space map. * Tag address space map.
...@@ -9,11 +34,11 @@ ...@@ -9,11 +34,11 @@
struct blk_mq_tags { struct blk_mq_tags {
unsigned int nr_tags; unsigned int nr_tags;
unsigned int nr_reserved_tags; unsigned int nr_reserved_tags;
unsigned int nr_batch_move;
unsigned int nr_max_cache;
struct percpu_ida free_tags; atomic_t active_queues;
struct percpu_ida reserved_tags;
struct blk_mq_bitmap_tags bitmap_tags;
struct blk_mq_bitmap_tags breserved_tags;
struct request **rqs; struct request **rqs;
struct list_head page_list; struct list_head page_list;
...@@ -23,12 +48,12 @@ struct blk_mq_tags { ...@@ -23,12 +48,12 @@ struct blk_mq_tags {
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
enum { enum {
BLK_MQ_TAG_CACHE_MIN = 1, BLK_MQ_TAG_CACHE_MIN = 1,
...@@ -41,4 +66,23 @@ enum { ...@@ -41,4 +66,23 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
}; };
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
return false;
return __blk_mq_tag_busy(hctx);
}
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
return;
__blk_mq_tag_idle(hctx);
}
#endif #endif
This diff is collapsed.
...@@ -11,7 +11,8 @@ struct blk_mq_ctx { ...@@ -11,7 +11,8 @@ struct blk_mq_ctx {
unsigned int cpu; unsigned int cpu;
unsigned int index_hw; unsigned int index_hw;
unsigned int ipi_redirect;
unsigned int last_tag ____cacheline_aligned_in_smp;
/* incremented at dispatch time */ /* incremented at dispatch time */
unsigned long rq_dispatched[2]; unsigned long rq_dispatched[2];
...@@ -22,7 +23,7 @@ struct blk_mq_ctx { ...@@ -22,7 +23,7 @@ struct blk_mq_ctx {
struct request_queue *queue; struct request_queue *queue;
struct kobject kobj; struct kobject kobj;
}; } ____cacheline_aligned_in_smp;
void __blk_mq_complete_request(struct request *rq); void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
...@@ -31,13 +32,14 @@ void blk_mq_drain_queue(struct request_queue *q); ...@@ -31,13 +32,14 @@ void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq, void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq); struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
/* /*
* CPU hotplug helpers * CPU hotplug helpers
*/ */
struct blk_mq_cpu_notifier; struct blk_mq_cpu_notifier;
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int), int (*fn)(void *, unsigned long, unsigned int),
void *data); void *data);
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
...@@ -50,7 +52,15 @@ void blk_mq_disable_hotplug(void); ...@@ -50,7 +52,15 @@ void blk_mq_disable_hotplug(void);
*/ */
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
void blk_mq_add_timer(struct request *rq); /*
* Basic implementation of sparser bitmap, allowing the user to spread
* the bits over more cachelines.
*/
struct blk_align_bitmap {
unsigned long word;
unsigned long depth;
} ____cacheline_aligned_in_smp;
#endif #endif
...@@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) ...@@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
static ssize_t static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count) queue_requests_store(struct request_queue *q, const char *page, size_t count)
{ {
struct request_list *rl;
unsigned long nr; unsigned long nr;
int ret; int ret, err;
if (!q->request_fn) if (!q->request_fn && !q->mq_ops)
return -EINVAL; return -EINVAL;
ret = queue_var_store(&nr, page, count); ret = queue_var_store(&nr, page, count);
...@@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) ...@@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (nr < BLKDEV_MIN_RQ) if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ; nr = BLKDEV_MIN_RQ;
spin_lock_irq(q->queue_lock); if (q->request_fn)
q->nr_requests = nr; err = blk_update_nr_requests(q, nr);
blk_queue_congestion_threshold(q); else
err = blk_mq_update_nr_requests(q, nr);
/* congestion isn't cgroup aware and follows root blkcg for now */
rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { if (err)
blk_set_rl_full(rl, BLK_RW_ASYNC); return err;
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return ret; return ret;
} }
...@@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
percpu_counter_destroy(&q->mq_usage_counter);
if (q->mq_ops) if (q->mq_ops)
blk_mq_free_queue(q); blk_mq_free_queue(q);
......
...@@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, ...@@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
static bool throtl_slice_used(struct throtl_grp *tg, bool rw) static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
{ {
if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
return 0; return false;
return 1; return 1;
} }
...@@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
if (tg->io_disp[rw] + 1 <= io_allowed) { if (tg->io_disp[rw] + 1 <= io_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return 1; return true;
} }
/* Calc approx time to dispatch */ /* Calc approx time to dispatch */
...@@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return 1; return true;
} }
/* Calc approx time to dispatch */ /* Calc approx time to dispatch */
...@@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, ...@@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return 1; return true;
} }
/* /*
...@@ -1258,7 +1258,7 @@ static void throtl_pending_timer_fn(unsigned long arg) ...@@ -1258,7 +1258,7 @@ static void throtl_pending_timer_fn(unsigned long arg)
* of throtl_data->service_queue. Those bio's are ready and issued by this * of throtl_data->service_queue. Those bio's are ready and issued by this
* function. * function.
*/ */
void blk_throtl_dispatch_work_fn(struct work_struct *work) static void blk_throtl_dispatch_work_fn(struct work_struct *work)
{ {
struct throtl_data *td = container_of(work, struct throtl_data, struct throtl_data *td = container_of(work, struct throtl_data,
dispatch_work); dispatch_work);
......
...@@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req) ...@@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req)
__blk_complete_request(req); __blk_complete_request(req);
break; break;
case BLK_EH_RESET_TIMER: case BLK_EH_RESET_TIMER:
if (q->mq_ops)
blk_mq_add_timer(req);
else
blk_add_timer(req); blk_add_timer(req);
blk_clear_rq_complete(req); blk_clear_rq_complete(req);
break; break;
case BLK_EH_NOT_HANDLED: case BLK_EH_NOT_HANDLED:
...@@ -170,7 +166,26 @@ void blk_abort_request(struct request *req) ...@@ -170,7 +166,26 @@ void blk_abort_request(struct request *req)
} }
EXPORT_SYMBOL_GPL(blk_abort_request); EXPORT_SYMBOL_GPL(blk_abort_request);
void __blk_add_timer(struct request *req, struct list_head *timeout_list) unsigned long blk_rq_timeout(unsigned long timeout)
{
unsigned long maxt;
maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
if (time_after(timeout, maxt))
timeout = maxt;
return timeout;
}
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
unsigned long expiry; unsigned long expiry;
...@@ -188,15 +203,15 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) ...@@ -188,15 +203,15 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
req->timeout = q->rq_timeout; req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout; req->deadline = jiffies + req->timeout;
if (timeout_list) if (!q->mq_ops)
list_add_tail(&req->timeout_list, timeout_list); list_add_tail(&req->timeout_list, &req->q->timeout_list);
/* /*
* If the timer isn't already pending or this timeout is earlier * If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest * than an existing one, modify the timer. Round up to next nearest
* second. * second.
*/ */
expiry = round_jiffies_up(req->deadline); expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
if (!timer_pending(&q->timeout) || if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) { time_before(expiry, q->timeout.expires)) {
...@@ -214,17 +229,3 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) ...@@ -214,17 +229,3 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
} }
} }
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{
__blk_add_timer(req, &req->q->timeout_list);
}
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
/* Number of requests a "batching" process may submit */ /* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ 32 #define BLK_BATCH_REQ 32
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *blk_requestq_cachep;
extern struct kmem_cache *request_cachep; extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype; extern struct kobj_type blk_queue_ktype;
...@@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error, ...@@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error,
void blk_rq_timed_out_timer(unsigned long data); void blk_rq_timed_out_timer(unsigned long data);
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set); unsigned int *next_set);
void __blk_add_timer(struct request *req, struct list_head *timeout_list); unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
...@@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) ...@@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
return q->nr_congestion_off; return q->nr_congestion_off;
} }
extern int blk_update_nr_requests(struct request_queue *, unsigned int);
/* /*
* Contribute to IO statistics IFF: * Contribute to IO statistics IFF:
* *
......
...@@ -4460,7 +4460,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) ...@@ -4460,7 +4460,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
static ssize_t static ssize_t
cfq_var_show(unsigned int var, char *page) cfq_var_show(unsigned int var, char *page)
{ {
return sprintf(page, "%d\n", var); return sprintf(page, "%u\n", var);
} }
static ssize_t static ssize_t
......
...@@ -178,7 +178,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) ...@@ -178,7 +178,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{ {
struct request *rq; struct request *rq;
rq = blk_mq_alloc_reserved_request(dd->queue, 0, __GFP_WAIT); rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
return blk_mq_rq_to_pdu(rq); return blk_mq_rq_to_pdu(rq);
} }
......
...@@ -322,39 +322,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) ...@@ -322,39 +322,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
} }
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
unsigned int hctx_index) unsigned int hctx_index,
int node)
{ {
int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes); return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
int tip = (set->nr_hw_queues % nr_online_nodes);
int node = 0, i, n;
/*
* Split submit queues evenly wrt to the number of nodes. If uneven,
* fill the first buckets with one extra, until the rest is filled with
* no extra.
*/
for (i = 0, n = 1; i < hctx_index; i++, n++) {
if (n % b_size == 0) {
n = 0;
node++;
tip--;
if (!tip)
b_size = set->nr_hw_queues / nr_online_nodes;
}
}
/*
* A node might not be online, therefore map the relative node id to the
* real node id.
*/
for_each_online_node(n) {
if (!node)
break;
node--;
}
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
} }
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
......
...@@ -743,6 +743,7 @@ static void skd_request_fn(struct request_queue *q) ...@@ -743,6 +743,7 @@ static void skd_request_fn(struct request_queue *q)
break; break;
} }
skreq->discard_page = 1; skreq->discard_page = 1;
req->completion_data = page;
skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
...@@ -855,10 +856,9 @@ static void skd_end_request(struct skd_device *skdev, ...@@ -855,10 +856,9 @@ static void skd_end_request(struct skd_device *skdev,
if ((io_flags & REQ_DISCARD) && if ((io_flags & REQ_DISCARD) &&
(skreq->discard_page == 1)) { (skreq->discard_page == 1)) {
struct bio *bio = req->bio;
pr_debug("%s:%s:%d, free the page!", pr_debug("%s:%s:%d, free the page!",
skdev->name, __func__, __LINE__); skdev->name, __func__, __LINE__);
__free_page(bio->bi_io_vec->bv_page); __free_page(req->completion_data);
} }
if (unlikely(error)) { if (unlikely(error)) {
......
...@@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk) ...@@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk)
add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
} }
EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif #endif
/********************************************************************* /*********************************************************************
......
...@@ -737,6 +737,7 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) ...@@ -737,6 +737,7 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
goto out; goto out;
} }
rq->completion_data = page;
blk_add_request_payload(rq, page, len); blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq); ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->__data_len = nr_bytes; rq->__data_len = nr_bytes;
...@@ -839,11 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) ...@@ -839,11 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{ {
struct scsi_cmnd *SCpnt = rq->special; struct scsi_cmnd *SCpnt = rq->special;
if (rq->cmd_flags & REQ_DISCARD) { if (rq->cmd_flags & REQ_DISCARD)
struct bio *bio = rq->bio; __free_page(rq->completion_data);
__free_page(bio->bi_io_vec->bv_page);
}
if (SCpnt->cmnd != rq->cmd) { if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool); mempool_free(SCpnt->cmnd, sd_cdb_pool);
SCpnt->cmnd = NULL; SCpnt->cmnd = NULL;
......
...@@ -14,14 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \ ...@@ -14,14 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \
stack.o fs_struct.o statfs.o stack.o fs_struct.o statfs.o
ifeq ($(CONFIG_BLOCK),y) ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o obj-y += buffer.o block_dev.o direct-io.o mpage.o
else else
obj-y += no-block.o obj-y += no-block.o
endif endif
obj-$(CONFIG_PROC_FS) += proc_namespace.o obj-$(CONFIG_PROC_FS) += proc_namespace.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
obj-y += notify/ obj-y += notify/
obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_EPOLL) += eventpoll.o
obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o
......
...@@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, ...@@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
extern struct bio_set *bioset_create(unsigned int, unsigned int); extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *); extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); extern mempool_t *biovec_create_pool(int pool_entries);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
......
...@@ -8,7 +8,13 @@ struct blk_mq_tags; ...@@ -8,7 +8,13 @@ struct blk_mq_tags;
struct blk_mq_cpu_notifier { struct blk_mq_cpu_notifier {
struct list_head list; struct list_head list;
void *data; void *data;
void (*notify)(void *data, unsigned long action, unsigned int cpu); int (*notify)(void *data, unsigned long action, unsigned int cpu);
};
struct blk_mq_ctxmap {
unsigned int map_size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
}; };
struct blk_mq_hw_ctx { struct blk_mq_hw_ctx {
...@@ -21,6 +27,8 @@ struct blk_mq_hw_ctx { ...@@ -21,6 +27,8 @@ struct blk_mq_hw_ctx {
struct delayed_work run_work; struct delayed_work run_work;
struct delayed_work delay_work; struct delayed_work delay_work;
cpumask_var_t cpumask; cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
unsigned long flags; /* BLK_MQ_F_* flags */ unsigned long flags; /* BLK_MQ_F_* flags */
...@@ -29,10 +37,12 @@ struct blk_mq_hw_ctx { ...@@ -29,10 +37,12 @@ struct blk_mq_hw_ctx {
void *driver_data; void *driver_data;
struct blk_mq_ctxmap ctx_map;
unsigned int nr_ctx; unsigned int nr_ctx;
struct blk_mq_ctx **ctxs; struct blk_mq_ctx **ctxs;
unsigned int nr_ctx_map;
unsigned long *ctx_map; unsigned int wait_index;
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
...@@ -44,6 +54,8 @@ struct blk_mq_hw_ctx { ...@@ -44,6 +54,8 @@ struct blk_mq_hw_ctx {
unsigned int numa_node; unsigned int numa_node;
unsigned int cmd_size; /* per-request extra data */ unsigned int cmd_size; /* per-request extra data */
atomic_t nr_active;
struct blk_mq_cpu_notifier cpu_notifier; struct blk_mq_cpu_notifier cpu_notifier;
struct kobject kobj; struct kobject kobj;
}; };
...@@ -51,7 +63,7 @@ struct blk_mq_hw_ctx { ...@@ -51,7 +63,7 @@ struct blk_mq_hw_ctx {
struct blk_mq_tag_set { struct blk_mq_tag_set {
struct blk_mq_ops *ops; struct blk_mq_ops *ops;
unsigned int nr_hw_queues; unsigned int nr_hw_queues;
unsigned int queue_depth; unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags; unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */ unsigned int cmd_size; /* per-request extra data */
int numa_node; int numa_node;
...@@ -60,12 +72,15 @@ struct blk_mq_tag_set { ...@@ -60,12 +72,15 @@ struct blk_mq_tag_set {
void *driver_data; void *driver_data;
struct blk_mq_tags **tags; struct blk_mq_tags **tags;
struct mutex tag_list_lock;
struct list_head tag_list;
}; };
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
unsigned int); unsigned int, int);
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
...@@ -122,11 +137,14 @@ enum { ...@@ -122,11 +137,14 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_SHOULD_IPI = 1 << 2, BLK_MQ_F_TAG_SHARED = 1 << 2,
BLK_MQ_S_STOPPED = 0, BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_MAX_DEPTH = 2048, BLK_MQ_MAX_DEPTH = 2048,
BLK_MQ_CPU_WORK_BATCH = 8,
}; };
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
...@@ -142,19 +160,20 @@ void blk_mq_insert_request(struct request *, bool, bool, bool); ...@@ -142,19 +160,20 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq); void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *); bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error); void blk_mq_end_io(struct request *rq, int error);
void __blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq); void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq); void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
...@@ -163,6 +182,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); ...@@ -163,6 +182,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
/* /*
* Driver command data is immediately after the request. So subtract request * Driver command data is immediately after the request. So subtract request
......
...@@ -190,6 +190,7 @@ enum rq_flag_bits { ...@@ -190,6 +190,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */ __REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */ __REQ_END, /* last of chain of requests */
__REQ_HASHED, /* on IO scheduler merge hash */ __REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
...@@ -243,5 +244,6 @@ enum rq_flag_bits { ...@@ -243,5 +244,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM) #define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END) #define REQ_END (1ULL << __REQ_END)
#define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#endif /* __LINUX_BLK_TYPES_H */ #endif /* __LINUX_BLK_TYPES_H */
...@@ -90,15 +90,15 @@ enum rq_cmd_type_bits { ...@@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
#define BLK_MAX_CDB 16 #define BLK_MAX_CDB 16
/* /*
* try to put the fields that are referenced together in the same cacheline. * Try to put the fields that are referenced together in the same cacheline.
* if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() *
* as well! * If you modify this structure, make sure to update blk_rq_init() and
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/ */
struct request { struct request {
struct list_head queuelist; struct list_head queuelist;
union { union {
struct call_single_data csd; struct call_single_data csd;
struct work_struct requeue_work;
unsigned long fifo_time; unsigned long fifo_time;
}; };
...@@ -462,6 +462,10 @@ struct request_queue { ...@@ -462,6 +462,10 @@ struct request_queue {
struct request *flush_rq; struct request *flush_rq;
spinlock_t mq_flush_lock; spinlock_t mq_flush_lock;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex sysfs_lock; struct mutex sysfs_lock;
int bypass_depth; int bypass_depth;
...@@ -480,6 +484,9 @@ struct request_queue { ...@@ -480,6 +484,9 @@ struct request_queue {
wait_queue_head_t mq_freeze_wq; wait_queue_head_t mq_freeze_wq;
struct percpu_counter mq_usage_counter; struct percpu_counter mq_usage_counter;
struct list_head all_q_node; struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
}; };
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
......
...@@ -30,7 +30,6 @@ endif ...@@ -30,7 +30,6 @@ endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_ZSWAP) += zswap.o
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment