Commit 33b40178 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Four smaller fixes for the current series.  This contains:

   - A fix for clones of discard bio's, that can cause data corruption.
     From Martin.

   - A fix for null_blk, where in certain queue modes it could access a
     request after it had been freed.  From Mike Krinkin.

   - An error handling leak fix for blkcg, from Tejun.

   - Also from Tejun, export of the functions that a file system needs
     to implement cgroup writeback support"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: Do a full clone when splitting discard bios
  block: export bio_associate_*() and wbc_account_io()
  blkcg: fix gendisk reference leak in blkg_conf_prep()
  null_blk: fix use-after-free problem
parents 9fbf075c f3f5da62
...@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio); ...@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
* Allocates and returns a new bio which represents @sectors from the start of * Allocates and returns a new bio which represents @sectors from the start of
* @bio, and updates @bio to represent the remaining sectors. * @bio, and updates @bio to represent the remaining sectors.
* *
* The newly allocated bio will point to @bio's bi_io_vec; it is the caller's * Unless this is a discard request the newly allocated bio will point
* responsibility to ensure that @bio is not freed before the split. * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
* @bio is not freed before the split.
*/ */
struct bio *bio_split(struct bio *bio, int sectors, struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs) gfp_t gfp, struct bio_set *bs)
...@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors, ...@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0); BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio)); BUG_ON(sectors >= bio_sectors(bio));
/*
* Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands.
*/
if (bio->bi_rw & REQ_DISCARD)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs); split = bio_clone_fast(bio, gfp, bs);
if (!split) if (!split)
return NULL; return NULL;
...@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) ...@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
bio->bi_css = blkcg_css; bio->bi_css = blkcg_css;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/** /**
* bio_associate_current - associate a bio with %current * bio_associate_current - associate a bio with %current
...@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio) ...@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
bio->bi_css = task_get_css(current, blkio_cgrp_id); bio->bi_css = task_get_css(current, blkio_cgrp_id);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(bio_associate_current);
/** /**
* bio_disassociate_task - undo bio_associate_current() * bio_disassociate_task - undo bio_associate_current()
......
...@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
return -EINVAL; return -EINVAL;
disk = get_gendisk(MKDEV(major, minor), &part); disk = get_gendisk(MKDEV(major, minor), &part);
if (!disk || part) if (!disk)
return -EINVAL; return -EINVAL;
if (part) {
put_disk(disk);
return -EINVAL;
}
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(disk->queue->queue_lock); spin_lock_irq(disk->queue->queue_lock);
......
...@@ -240,20 +240,20 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) ...@@ -240,20 +240,20 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
while ((entry = llist_del_all(&cq->list)) != NULL) { while ((entry = llist_del_all(&cq->list)) != NULL) {
entry = llist_reverse_order(entry); entry = llist_reverse_order(entry);
do { do {
struct request_queue *q = NULL;
cmd = container_of(entry, struct nullb_cmd, ll_list); cmd = container_of(entry, struct nullb_cmd, ll_list);
entry = entry->next; entry = entry->next;
if (cmd->rq)
q = cmd->rq->q;
end_cmd(cmd); end_cmd(cmd);
if (cmd->rq) { if (q && !q->mq_ops && blk_queue_stopped(q)) {
struct request_queue *q = cmd->rq->q;
if (!q->mq_ops && blk_queue_stopped(q)) {
spin_lock(q->queue_lock); spin_lock(q->queue_lock);
if (blk_queue_stopped(q)) if (blk_queue_stopped(q))
blk_start_queue(q); blk_start_queue(q);
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
} }
}
} while (entry); } while (entry);
} }
......
...@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page, ...@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
else else
wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
} }
EXPORT_SYMBOL_GPL(wbc_account_io);
/** /**
* inode_congested - test whether an inode is congested * inode_congested - test whether an inode is congested
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment