Commit b3c9dd18 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block

* 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits)
  Revert "block: recursive merge requests"
  block: Stop using macro stubs for the bio data integrity calls
  blockdev: convert some macros to static inlines
  fs: remove unneeded plug in mpage_readpages()
  block: Add BLKROTATIONAL ioctl
  block: Introduce blk_set_stacking_limits function
  block: remove WARN_ON_ONCE() in exit_io_context()
  block: an exiting task should be allowed to create io_context
  block: ioc_cgroup_changed() needs to be exported
  block: recursive merge requests
  block, cfq: fix empty queue crash caused by request merge
  block, cfq: move icq creation and rq->elv.icq association to block core
  block, cfq: restructure io_cq creation path for io_context interface cleanup
  block, cfq: move io_cq exit/release to blk-ioc.c
  block, cfq: move icq cache management to block core
  block, cfq: move io_cq lookup to blk-ioc.c
  block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq
  block, cfq: reorganize cfq_io_context into generic and cfq specific parts
  block: remove elevator_queue->ops
  block: reorder elevator switch sequence
  ...

Fix up conflicts in:
 - block/blk-cgroup.c
	Switch from can_attach_task to can_attach
 - block/cfq-iosched.c
	conflict with now removed cic index changes (we now use q->id instead)
parents 83c2f912 5d381efb
...@@ -1655,11 +1655,12 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, ...@@ -1655,11 +1655,12 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct io_context *ioc; struct io_context *ioc;
cgroup_taskset_for_each(task, cgrp, tset) { cgroup_taskset_for_each(task, cgrp, tset) {
task_lock(task); /* we don't lose anything even if ioc allocation fails */
ioc = task->io_context; ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) if (ioc) {
ioc->cgroup_changed = 1; ioc_cgroup_changed(ioc);
task_unlock(task); put_io_context(ioc, NULL);
}
} }
} }
......
This diff is collapsed.
...@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
{ {
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dead(q))) {
spin_unlock_irq(q->queue_lock);
rq->errors = -ENXIO; rq->errors = -ENXIO;
if (rq->end_io) if (rq->end_io)
rq->end_io(rq, rq->errors); rq->end_io(rq, rq->errors);
...@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->end_io = done; rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where); __elv_add_request(q, rq, where);
__blk_run_queue(q); __blk_run_queue(q);
/* the queue is stopped so it won't be run */ /* the queue is stopped so it won't be run */
......
This diff is collapsed.
...@@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); ...@@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
* @lim: the queue_limits structure to reset * @lim: the queue_limits structure to reset
* *
* Description: * Description:
* Returns a queue_limit struct to its default state. Can be used by * Returns a queue_limit struct to its default state.
* stacking drivers like DM that stage table swaps and reuse an
* existing device queue.
*/ */
void blk_set_default_limits(struct queue_limits *lim) void blk_set_default_limits(struct queue_limits *lim)
{ {
...@@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim) ...@@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_integrity_segments = 0; lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = BLK_DEF_MAX_SECTORS; lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_hw_sectors = INT_MAX;
lim->max_discard_sectors = 0; lim->max_discard_sectors = 0;
lim->discard_granularity = 0; lim->discard_granularity = 0;
lim->discard_alignment = 0; lim->discard_alignment = 0;
lim->discard_misaligned = 0; lim->discard_misaligned = 0;
lim->discard_zeroes_data = 1; lim->discard_zeroes_data = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0; lim->alignment_offset = 0;
...@@ -130,6 +127,27 @@ void blk_set_default_limits(struct queue_limits *lim) ...@@ -130,6 +127,27 @@ void blk_set_default_limits(struct queue_limits *lim)
} }
EXPORT_SYMBOL(blk_set_default_limits); EXPORT_SYMBOL(blk_set_default_limits);
/**
* blk_set_stacking_limits - set default limits for stacking devices
* @lim: the queue_limits structure to reset
*
* Description:
* Returns a queue_limit struct to its default state. Should be used
* by stacking drivers like DM that have no internal limits.
*/
void blk_set_stacking_limits(struct queue_limits *lim)
{
blk_set_default_limits(lim);
/* Inherit limits from component devices */
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_sectors = BLK_DEF_MAX_SECTORS;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
/** /**
* blk_queue_make_request - define an alternate make_request function for a device * blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected * @q: the request queue for the device to be affected
...@@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) ...@@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_batching = BLK_BATCH_REQ; q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
q->limits.discard_zeroes_data = 0;
/* /*
* by default assume old behaviour and bounce for any highmem page * by default assume old behaviour and bounce for any highmem page
......
...@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) ...@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show) if (!entry->show)
return -EIO; return -EIO;
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (blk_queue_dead(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
...@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj); q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (blk_queue_dead(q)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return -ENOENT; return -ENOENT;
} }
...@@ -479,8 +479,12 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -479,8 +479,12 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q); blk_sync_queue(q);
if (q->elevator) if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
spin_unlock_irq(q->queue_lock);
elevator_exit(q->elevator); elevator_exit(q->elevator);
}
blk_throtl_exit(q); blk_throtl_exit(q);
...@@ -494,6 +498,8 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -494,6 +498,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_trace_shutdown(q); blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
ida_simple_remove(&blk_queue_ida, q->id);
kmem_cache_free(blk_requestq_cachep, q); kmem_cache_free(blk_requestq_cachep, q);
} }
......
...@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
struct request_queue *q = td->queue; struct request_queue *q = td->queue;
/* no throttling for dead queue */ /* no throttling for dead queue */
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
...@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
/* Make sure @q is still alive */ /* Make sure @q is still alive */
if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { if (unlikely(blk_queue_dead(q))) {
kfree(tg); kfree(tg);
return NULL; return NULL;
} }
......
#ifndef BLK_INTERNAL_H #ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H #define BLK_INTERNAL_H
#include <linux/idr.h>
/* Amount of time in which a process may batch requests */ /* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME (HZ/50UL) #define BLK_BATCH_TIME (HZ/50UL)
...@@ -9,6 +11,12 @@ ...@@ -9,6 +11,12 @@
extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype; extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
static inline void __blk_get_queue(struct request_queue *q)
{
kobject_get(&q->kobj);
}
void init_request_from_bio(struct request *req, struct bio *bio); void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq, void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
...@@ -85,8 +93,8 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -85,8 +93,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1; q->flush_queue_delayed = 1;
return NULL; return NULL;
} }
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || if (unlikely(blk_queue_dead(q)) ||
!q->elevator->ops->elevator_dispatch_fn(q, 0)) !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL; return NULL;
} }
} }
...@@ -95,16 +103,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq) ...@@ -95,16 +103,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->ops->elevator_activate_req_fn) if (e->type->ops.elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq); e->type->ops.elevator_activate_req_fn(q, rq);
} }
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->ops->elevator_deactivate_req_fn) if (e->type->ops.elevator_deactivate_req_fn)
e->ops->elevator_deactivate_req_fn(q, rq); e->type->ops.elevator_deactivate_req_fn(q, rq);
} }
#ifdef CONFIG_FAIL_IO_TIMEOUT #ifdef CONFIG_FAIL_IO_TIMEOUT
...@@ -119,8 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q) ...@@ -119,8 +127,6 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
} }
#endif #endif
struct io_context *current_io_context(gfp_t gfp_flags, int node);
int ll_back_merge_fn(struct request_queue *q, struct request *req, int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio); struct bio *bio);
int ll_front_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req,
...@@ -189,6 +195,42 @@ static inline int blk_do_io_stat(struct request *rq) ...@@ -189,6 +195,42 @@ static inline int blk_do_io_stat(struct request *rq)
(rq->cmd_flags & REQ_DISCARD)); (rq->cmd_flags & REQ_DISCARD));
} }
/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
int node);
/**
* create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
* If @task->io_context is %NULL, allocate a new io_context and install it.
* Returns the current @task->io_context which may be %NULL if allocation
* failed.
*
* Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe.
*/
static inline struct io_context *create_io_context(struct task_struct *task,
gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context))
create_io_context_slowpath(task, gfp_mask, node);
return task->io_context;
}
/*
* Internal throttling interface
*/
#ifdef CONFIG_BLK_DEV_THROTTLING #ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q); extern void blk_throtl_drain(struct request_queue *q);
......
...@@ -769,12 +769,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode, ...@@ -769,12 +769,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct file *file) struct file *file)
{ {
struct bsg_device *bd; struct bsg_device *bd;
int ret;
#ifdef BSG_DEBUG #ifdef BSG_DEBUG
unsigned char buf[32]; unsigned char buf[32];
#endif #endif
ret = blk_get_queue(rq); if (!blk_get_queue(rq))
if (ret)
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
bd = bsg_alloc_device(); bd = bsg_alloc_device();
......
This diff is collapsed.
...@@ -719,6 +719,9 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -719,6 +719,9 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKSECTGET: case BLKSECTGET:
return compat_put_ushort(arg, return compat_put_ushort(arg,
queue_max_sectors(bdev_get_queue(bdev))); queue_max_sectors(bdev_get_queue(bdev)));
case BLKROTATIONAL:
return compat_put_ushort(arg,
!blk_queue_nonrot(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */ case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET: case BLKFRASET:
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
......
...@@ -448,9 +448,7 @@ static struct elevator_type iosched_deadline = { ...@@ -448,9 +448,7 @@ static struct elevator_type iosched_deadline = {
static int __init deadline_init(void) static int __init deadline_init(void)
{ {
elv_register(&iosched_deadline); return elv_register(&iosched_deadline);
return 0;
} }
static void __exit deadline_exit(void) static void __exit deadline_exit(void)
......
This diff is collapsed.
...@@ -614,7 +614,7 @@ void add_disk(struct gendisk *disk) ...@@ -614,7 +614,7 @@ void add_disk(struct gendisk *disk)
* Take an extra ref on queue which will be put on disk_release() * Take an extra ref on queue which will be put on disk_release()
* so that it sticks around as long as @disk is there. * so that it sticks around as long as @disk is there.
*/ */
WARN_ON_ONCE(blk_get_queue(disk->queue)); WARN_ON_ONCE(!blk_get_queue(disk->queue));
retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
"bdi"); "bdi");
......
...@@ -296,6 +296,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -296,6 +296,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return put_uint(arg, bdev_discard_zeroes_data(bdev)); return put_uint(arg, bdev_discard_zeroes_data(bdev));
case BLKSECTGET: case BLKSECTGET:
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKROTATIONAL:
return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev)));
case BLKRASET: case BLKRASET:
case BLKFRASET: case BLKFRASET:
if(!capable(CAP_SYS_ADMIN)) if(!capable(CAP_SYS_ADMIN))
......
...@@ -94,9 +94,7 @@ static struct elevator_type elevator_noop = { ...@@ -94,9 +94,7 @@ static struct elevator_type elevator_noop = {
static int __init noop_init(void) static int __init noop_init(void)
{ {
elv_register(&elevator_noop); return elv_register(&elevator_noop);
return 0;
} }
static void __exit noop_exit(void) static void __exit noop_exit(void)
......
...@@ -619,8 +619,10 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) ...@@ -619,8 +619,10 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
host->state == HST_DEV_SCAN); host->state == HST_DEV_SCAN);
spin_unlock_irq(&host->lock); spin_unlock_irq(&host->lock);
DPRINTK("blk_insert_request, tag == %u\n", idx); DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
blk_insert_request(host->oob_q, crq->rq, 1, crq); crq->rq->cmd_type = REQ_TYPE_SPECIAL;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0; return 0;
...@@ -658,8 +660,10 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) ...@@ -658,8 +660,10 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
BUG_ON(rc < 0); BUG_ON(rc < 0);
crq->msg_bucket = (u32) rc; crq->msg_bucket = (u32) rc;
DPRINTK("blk_insert_request, tag == %u\n", idx); DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
blk_insert_request(host->oob_q, crq->rq, 1, crq); crq->rq->cmd_type = REQ_TYPE_SPECIAL;
crq->rq->special = crq;
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0; return 0;
} }
......
...@@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, ...@@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
while (i < dm_table_get_num_targets(table)) { while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++); ti = dm_table_get_target(table, i++);
blk_set_default_limits(&ti_limits); blk_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */ /* combine all target devices' limits */
if (ti->type->iterate_devices) if (ti->type->iterate_devices)
...@@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table, ...@@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits ti_limits; struct queue_limits ti_limits;
unsigned i = 0; unsigned i = 0;
blk_set_default_limits(limits); blk_set_stacking_limits(limits);
while (i < dm_table_get_num_targets(table)) { while (i < dm_table_get_num_targets(table)) {
blk_set_default_limits(&ti_limits); blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i++); ti = dm_table_get_target(table, i++);
......
...@@ -4666,6 +4666,7 @@ static int md_alloc(dev_t dev, char *name) ...@@ -4666,6 +4666,7 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue->queuedata = mddev; mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request); blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift); disk = alloc_disk(1 << shift);
if (!disk) { if (!disk) {
......
...@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, ...@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev); kfree(sdev);
goto out; goto out;
} }
blk_get_queue(sdev->request_queue); WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev; sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
......
...@@ -48,28 +48,12 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ...@@ -48,28 +48,12 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
if (err) if (err)
return err; return err;
task_lock(task); ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
do { if (ioc) {
ioc = task->io_context; ioc_ioprio_changed(ioc, ioprio);
/* see wmb() in current_io_context() */ put_io_context(ioc, NULL);
smp_read_barrier_depends();
if (ioc)
break;
ioc = alloc_io_context(GFP_ATOMIC, -1);
if (!ioc) {
err = -ENOMEM;
break;
}
task->io_context = ioc;
} while (1);
if (!err) {
ioc->ioprio = ioprio;
ioc->ioprio_changed = 1;
} }
task_unlock(task);
return err; return err;
} }
EXPORT_SYMBOL_GPL(set_task_ioprio); EXPORT_SYMBOL_GPL(set_task_ioprio);
......
...@@ -371,9 +371,6 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -371,9 +371,6 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0; sector_t last_block_in_bio = 0;
struct buffer_head map_bh; struct buffer_head map_bh;
unsigned long first_logical_block = 0; unsigned long first_logical_block = 0;
struct blk_plug plug;
blk_start_plug(&plug);
map_bh.b_state = 0; map_bh.b_state = 0;
map_bh.b_size = 0; map_bh.b_size = 0;
...@@ -395,7 +392,6 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, ...@@ -395,7 +392,6 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
BUG_ON(!list_empty(pages)); BUG_ON(!list_empty(pages));
if (bio) if (bio)
mpage_bio_submit(READ, bio); mpage_bio_submit(READ, bio);
blk_finish_plug(&plug);
return 0; return 0;
} }
EXPORT_SYMBOL(mpage_readpages); EXPORT_SYMBOL(mpage_readpages);
......
...@@ -515,24 +515,64 @@ extern void bio_integrity_init(void); ...@@ -515,24 +515,64 @@ extern void bio_integrity_init(void);
#else /* CONFIG_BLK_DEV_INTEGRITY */ #else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0) static inline int bio_integrity(struct bio *bio)
#define bioset_integrity_create(a, b) (0) {
#define bio_integrity_prep(a) (0) return 0;
#define bio_integrity_enabled(a) (0) }
static inline int bio_integrity_enabled(struct bio *bio)
{
return 0;
}
static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
return 0;
}
static inline void bioset_integrity_free (struct bio_set *bs)
{
return;
}
static inline int bio_integrity_prep(struct bio *bio)
{
return 0;
}
static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
return;
}
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
gfp_t gfp_mask, struct bio_set *bs) gfp_t gfp_mask, struct bio_set *bs)
{ {
return 0; return 0;
} }
#define bioset_integrity_free(a) do { } while (0)
#define bio_integrity_free(a, b) do { } while (0) static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
#define bio_integrity_endio(a, b) do { } while (0) int sectors)
#define bio_integrity_advance(a, b) do { } while (0) {
#define bio_integrity_trim(a, b, c) do { } while (0) return;
#define bio_integrity_split(a, b, c) do { } while (0) }
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0) static inline void bio_integrity_advance(struct bio *bio,
#define bio_integrity_init(a) do { } while (0) unsigned int bytes_done)
{
return;
}
static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
unsigned int sectors)
{
return;
}
static inline void bio_integrity_init(void)
{
return;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
......
...@@ -111,10 +111,14 @@ struct request { ...@@ -111,10 +111,14 @@ struct request {
* Three pointers are available for the IO schedulers, if they need * Three pointers are available for the IO schedulers, if they need
* more they have to dynamically allocate it. Flush requests are * more they have to dynamically allocate it. Flush requests are
* never put on the IO scheduler. So let the flush fields share * never put on the IO scheduler. So let the flush fields share
* space with the three elevator_private pointers. * space with the elevator data.
*/ */
union { union {
void *elevator_private[3]; struct {
struct io_cq *icq;
void *priv[2];
} elv;
struct { struct {
unsigned int seq; unsigned int seq;
struct list_head list; struct list_head list;
...@@ -310,6 +314,12 @@ struct request_queue { ...@@ -310,6 +314,12 @@ struct request_queue {
*/ */
unsigned long queue_flags; unsigned long queue_flags;
/*
* ida allocated id for this queue. Used to index queues from
* ioctx.
*/
int id;
/* /*
* queue needs bounce pages for pages above this limit * queue needs bounce pages for pages above this limit
*/ */
...@@ -351,6 +361,8 @@ struct request_queue { ...@@ -351,6 +361,8 @@ struct request_queue {
struct timer_list timeout; struct timer_list timeout;
struct list_head timeout_list; struct list_head timeout_list;
struct list_head icq_list;
struct queue_limits limits; struct queue_limits limits;
/* /*
...@@ -387,6 +399,9 @@ struct request_queue { ...@@ -387,6 +399,9 @@ struct request_queue {
/* Throttle data */ /* Throttle data */
struct throtl_data *td; struct throtl_data *td;
#endif #endif
#ifdef CONFIG_LOCKDEP
int ioc_release_depth;
#endif
}; };
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
...@@ -481,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -481,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \ #define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
...@@ -660,7 +676,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); ...@@ -660,7 +676,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern struct request *blk_make_request(struct request_queue *, struct bio *, extern struct request *blk_make_request(struct request_queue *, struct bio *,
gfp_t); gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page, extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len); unsigned int len);
...@@ -829,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min); ...@@ -829,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset); sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
...@@ -859,7 +875,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte ...@@ -859,7 +875,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void); extern long nr_blockdev_pages(void);
int blk_get_queue(struct request_queue *); bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int); struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *);
...@@ -1282,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q) ...@@ -1282,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q)
#else /* CONFIG_BLK_DEV_INTEGRITY */ #else /* CONFIG_BLK_DEV_INTEGRITY */
#define blk_integrity_rq(rq) (0) struct bio;
#define blk_rq_count_integrity_sg(a, b) (0) struct block_device;
#define blk_rq_map_integrity_sg(a, b, c) (0) struct gendisk;
#define bdev_get_integrity(a) (0) struct blk_integrity;
#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0) static inline int blk_integrity_rq(struct request *rq)
#define blk_integrity_register(a, b) (0) {
#define blk_integrity_unregister(a) do { } while (0) return 0;
#define blk_queue_max_integrity_segments(a, b) do { } while (0) }
#define queue_max_integrity_segments(a) (0) static inline int blk_rq_count_integrity_sg(struct request_queue *q,
#define blk_integrity_merge_rq(a, b, c) (0) struct bio *b)
#define blk_integrity_merge_bio(a, b, c) (0) {
#define blk_integrity_is_initialized(a) (0) return 0;
}
static inline int blk_rq_map_integrity_sg(struct request_queue *q,
struct bio *b,
struct scatterlist *s)
{
return 0;
}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return 0;
}
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
return NULL;
}
static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{
return 0;
}
static inline int blk_integrity_register(struct gendisk *d,
struct blk_integrity *b)
{
return 0;
}
static inline void blk_integrity_unregister(struct gendisk *d)
{
}
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
unsigned int segs)
{
}
static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
{
return 0;
}
static inline int blk_integrity_merge_rq(struct request_queue *rq,
struct request *r1,
struct request *r2)
{
return 0;
}
static inline int blk_integrity_merge_bio(struct request_queue *rq,
struct request *r,
struct bio *b)
{
return 0;
}
static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
struct io_cq;
typedef int (elevator_merge_fn) (struct request_queue *, struct request **, typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
struct bio *); struct bio *);
...@@ -24,6 +26,8 @@ typedef struct request *(elevator_request_list_fn) (struct request_queue *, stru ...@@ -24,6 +26,8 @@ typedef struct request *(elevator_request_list_fn) (struct request_queue *, stru
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_may_queue_fn) (struct request_queue *, int); typedef int (elevator_may_queue_fn) (struct request_queue *, int);
typedef void (elevator_init_icq_fn) (struct io_cq *);
typedef void (elevator_exit_icq_fn) (struct io_cq *);
typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
typedef void (elevator_put_req_fn) (struct request *); typedef void (elevator_put_req_fn) (struct request *);
typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
...@@ -56,6 +60,9 @@ struct elevator_ops ...@@ -56,6 +60,9 @@ struct elevator_ops
elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_former_req_fn;
elevator_request_list_fn *elevator_latter_req_fn; elevator_request_list_fn *elevator_latter_req_fn;
elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
elevator_set_req_fn *elevator_set_req_fn; elevator_set_req_fn *elevator_set_req_fn;
elevator_put_req_fn *elevator_put_req_fn; elevator_put_req_fn *elevator_put_req_fn;
...@@ -63,7 +70,6 @@ struct elevator_ops ...@@ -63,7 +70,6 @@ struct elevator_ops
elevator_init_fn *elevator_init_fn; elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn; elevator_exit_fn *elevator_exit_fn;
void (*trim)(struct io_context *);
}; };
#define ELV_NAME_MAX (16) #define ELV_NAME_MAX (16)
...@@ -79,11 +85,20 @@ struct elv_fs_entry { ...@@ -79,11 +85,20 @@ struct elv_fs_entry {
*/ */
struct elevator_type struct elevator_type
{ {
struct list_head list; /* managed by elevator core */
struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */
struct elevator_ops ops; struct elevator_ops ops;
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs; struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX]; char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner; struct module *elevator_owner;
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
struct list_head list;
}; };
/* /*
...@@ -91,10 +106,9 @@ struct elevator_type ...@@ -91,10 +106,9 @@ struct elevator_type
*/ */
struct elevator_queue struct elevator_queue
{ {
struct elevator_ops *ops; struct elevator_type *type;
void *elevator_data; void *elevator_data;
struct kobject kobj; struct kobject kobj;
struct elevator_type *elevator_type;
struct mutex sysfs_lock; struct mutex sysfs_lock;
struct hlist_head *hash; struct hlist_head *hash;
unsigned int registered:1; unsigned int registered:1;
...@@ -129,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *); ...@@ -129,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
/* /*
* io scheduler registration * io scheduler registration
*/ */
extern void elv_register(struct elevator_type *); extern int elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *); extern void elv_unregister(struct elevator_type *);
/* /*
...@@ -197,22 +211,5 @@ enum { ...@@ -197,22 +211,5 @@ enum {
INIT_LIST_HEAD(&(rq)->csd.list); \ INIT_LIST_HEAD(&(rq)->csd.list); \
} while (0) } while (0)
/*
* io context count accounting
*/
#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
#define elv_ioc_count_inc(name) this_cpu_inc(name)
#define elv_ioc_count_dec(name) this_cpu_dec(name)
#define elv_ioc_count_read(name) \
({ \
unsigned long __val = 0; \
int __cpu; \
smp_wmb(); \
for_each_possible_cpu(__cpu) \
__val += per_cpu(name, __cpu); \
__val; \
})
#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLOCK */
#endif #endif
...@@ -319,6 +319,7 @@ struct inodes_stat_t { ...@@ -319,6 +319,7 @@ struct inodes_stat_t {
#define BLKPBSZGET _IO(0x12,123) #define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124) #define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125) #define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */ #define FIBMAP _IO(0x00,1) /* bmap access */
......
...@@ -3,32 +3,92 @@ ...@@ -3,32 +3,92 @@
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/workqueue.h>
struct cfq_queue; enum {
struct cfq_ttime { ICQ_IOPRIO_CHANGED,
unsigned long last_end_request; ICQ_CGROUP_CHANGED,
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
}; };
struct cfq_io_context { /*
void *key; * An io_cq (icq) is association between an io_context (ioc) and a
* request_queue (q). This is used by elevators which need to track
struct cfq_queue *cfqq[2]; * information per ioc - q pair.
*
* Elevator can request use of icq by setting elevator_type->icq_size and
* ->icq_align. Both size and align must be larger than that of struct
* io_cq and elevator can use the tail area for private information. The
* recommended way to do this is defining a struct which contains io_cq as
* the first member followed by private members and using its size and
* align. For example,
*
* struct snail_io_cq {
* struct io_cq icq;
* int poke_snail;
* int feed_snail;
* };
*
* struct elevator_type snail_elv_type {
* .ops = { ... },
* .icq_size = sizeof(struct snail_io_cq),
* .icq_align = __alignof__(struct snail_io_cq),
* ...
* };
*
* If icq_size is set, block core will manage icq's. All requests will
* have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
* is called and be holding a reference to the associated io_context.
*
* Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
* called and, on destruction, ->elevator_exit_icq_fn(). Both functions
* are called with both the associated io_context and queue locks held.
*
* Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
* queue lock but the returned icq is valid only until the queue lock is
* released. Elevators can not and should not try to create or destroy
* icq's.
*
* As icq's are linked from both ioc and q, the locking rules are a bit
* complex.
*
* - ioc lock nests inside q lock.
*
* - ioc->icq_list and icq->ioc_node are protected by ioc lock.
* q->icq_list and icq->q_node by q lock.
*
* - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
* itself is protected by q lock. However, both the indexes and icq
* itself are also RCU managed and lookup can be performed holding only
* the q lock.
*
* - icq's are not reference counted. They are destroyed when either the
* ioc or q goes away. Each request with icq set holds an extra
* reference to ioc to ensure it stays until the request is completed.
*
* - Linking and unlinking icq's are performed while holding both ioc and q
* locks. Due to the lock ordering, q exit is simple but ioc exit
* requires reverse-order double lock dance.
*/
struct io_cq {
struct request_queue *q;
struct io_context *ioc; struct io_context *ioc;
struct cfq_ttime ttime; /*
* q_node and ioc_node link io_cq through icq_list of q and ioc
struct list_head queue_list; * respectively. Both fields are unused once ioc_exit_icq() is
struct hlist_node cic_list; * called and shared with __rcu_icq_cache and __rcu_head which are
* used for RCU free of io_cq.
void (*dtor)(struct io_context *); /* destructor */ */
void (*exit)(struct io_context *); /* called on task exit */ union {
struct list_head q_node;
struct rcu_head rcu_head; struct kmem_cache *__rcu_icq_cache;
};
union {
struct hlist_node ioc_node;
struct rcu_head __rcu_head;
};
unsigned long changed;
}; };
/* /*
...@@ -43,11 +103,6 @@ struct io_context { ...@@ -43,11 +103,6 @@ struct io_context {
spinlock_t lock; spinlock_t lock;
unsigned short ioprio; unsigned short ioprio;
unsigned short ioprio_changed;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
unsigned short cgroup_changed;
#endif
/* /*
* For request batching * For request batching
...@@ -55,9 +110,11 @@ struct io_context { ...@@ -55,9 +110,11 @@ struct io_context {
int nr_batch_requests; /* Number of requests left in the batch */ int nr_batch_requests; /* Number of requests left in the batch */
unsigned long last_waited; /* Time last woken after wait for request */ unsigned long last_waited; /* Time last woken after wait for request */
struct radix_tree_root radix_root; struct radix_tree_root icq_tree;
struct hlist_head cic_list; struct io_cq __rcu *icq_hint;
void __rcu *ioc_data; struct hlist_head icq_list;
struct work_struct release_work;
}; };
static inline struct io_context *ioc_task_link(struct io_context *ioc) static inline struct io_context *ioc_task_link(struct io_context *ioc)
...@@ -76,20 +133,17 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) ...@@ -76,20 +133,17 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
struct task_struct; struct task_struct;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
int put_io_context(struct io_context *ioc); void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
void exit_io_context(struct task_struct *task); void exit_io_context(struct task_struct *task);
struct io_context *get_io_context(gfp_t gfp_flags, int node); struct io_context *get_task_io_context(struct task_struct *task,
struct io_context *alloc_io_context(gfp_t gfp_flags, int node); gfp_t gfp_flags, int node);
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
void ioc_cgroup_changed(struct io_context *ioc);
#else #else
static inline void exit_io_context(struct task_struct *task)
{
}
struct io_context; struct io_context;
static inline int put_io_context(struct io_context *ioc) static inline void put_io_context(struct io_context *ioc,
{ struct request_queue *locked_q) { }
return 1; static inline void exit_io_context(struct task_struct *task) { }
}
#endif #endif
#endif #endif
...@@ -873,6 +873,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) ...@@ -873,6 +873,7 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{ {
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context; struct io_context *ioc = current->io_context;
struct io_context *new_ioc;
if (!ioc) if (!ioc)
return 0; return 0;
...@@ -884,11 +885,12 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk) ...@@ -884,11 +885,12 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
if (unlikely(!tsk->io_context)) if (unlikely(!tsk->io_context))
return -ENOMEM; return -ENOMEM;
} else if (ioprio_valid(ioc->ioprio)) { } else if (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, -1); new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
if (unlikely(!tsk->io_context)) if (unlikely(!new_ioc))
return -ENOMEM; return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio; new_ioc->ioprio = ioc->ioprio;
put_io_context(new_ioc, NULL);
} }
#endif #endif
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment