Commit 6f0d7a9e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "Four small fixes that should be merged for the current 3.18-rc series.
  This pull request contains:

   - a minor bugfix for computation of best IO priority given two
     merging requests.  From Jan Kara.

   - the final (final) merge count issue that has been plaguing
     virtio-blk.  From Ming Lei.

   - enable parallel reinit notify for blk-mq queues, to combine the
     cost of an RCU grace period across lots of devices.  From Tejun
     Heo.

   - an error handling fix for the SCSI_IOCTL_SEND_COMMAND ioctl.  From
     Tony Battersby"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: blk-merge: fix blk_recount_segments()
  scsi: Fix more error handling in SCSI_IOCTL_SEND_COMMAND
  blk-mq: make mq_queue_reinit_notify() freeze queues in parallel
  block: Fix computation of merged request priority
parents 78646f62 7f60dcaa
...@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq) ...@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio) void blk_recount_segments(struct request_queue *q, struct bio *bio)
{ {
bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, unsigned short seg_cnt;
&q->queue_flags);
bool merge_not_need = bio->bi_vcnt < queue_max_segments(q); /* estimate segment number by bi_vcnt for non-cloned bio */
if (bio_flagged(bio, BIO_CLONED))
seg_cnt = bio_segments(bio);
else
seg_cnt = bio->bi_vcnt;
if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
merge_not_need) (seg_cnt < queue_max_segments(q)))
bio->bi_phys_segments = bio->bi_vcnt; bio->bi_phys_segments = seg_cnt;
else { else {
struct bio *nxt = bio->bi_next; struct bio *nxt = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
no_sg_merge && merge_not_need);
bio->bi_next = nxt; bio->bi_next = nxt;
} }
......
...@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) ...@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
/* static void blk_mq_freeze_queue_start(struct request_queue *q)
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
*/
void blk_mq_freeze_queue(struct request_queue *q)
{ {
bool freeze; bool freeze;
...@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q) ...@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
percpu_ref_kill(&q->mq_usage_counter); percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_queues(q, false); blk_mq_run_queues(q, false);
} }
}
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
} }
/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
*/
void blk_mq_freeze_queue(struct request_queue *q)
{
blk_mq_freeze_queue_start(q);
blk_mq_freeze_queue_wait(q);
}
static void blk_mq_unfreeze_queue(struct request_queue *q) static void blk_mq_unfreeze_queue(struct request_queue *q)
{ {
bool wake; bool wake;
...@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q) ...@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */ /* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q) static void blk_mq_queue_reinit(struct request_queue *q)
{ {
blk_mq_freeze_queue(q); WARN_ON_ONCE(!q->mq_freeze_depth);
blk_mq_sysfs_unregister(q); blk_mq_sysfs_unregister(q);
...@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q) ...@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
blk_mq_map_swqueue(q); blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q); blk_mq_sysfs_register(q);
blk_mq_unfreeze_queue(q);
} }
static int blk_mq_queue_reinit_notify(struct notifier_block *nb, static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
...@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, ...@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
/*
* We need to freeze and reinit all existing queues. Freezing
* involves synchronous wait for an RCU grace period and doing it
* one by one may take a long time. Start freezing all queues in
* one swoop and then wait for the completions so that freezing can
* take place in parallel.
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_wait(q);
list_for_each_entry(q, &all_q_list, all_q_node) list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q); blk_mq_queue_reinit(q);
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_unfreeze_queue(q);
mutex_unlock(&all_q_mutex); mutex_unlock(&all_q_mutex);
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -157,14 +157,16 @@ static int get_task_ioprio(struct task_struct *p) ...@@ -157,14 +157,16 @@ static int get_task_ioprio(struct task_struct *p)
int ioprio_best(unsigned short aprio, unsigned short bprio) int ioprio_best(unsigned short aprio, unsigned short bprio)
{ {
unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); unsigned short aclass;
unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); unsigned short bclass;
if (aclass == IOPRIO_CLASS_NONE) if (!ioprio_valid(aprio))
aclass = IOPRIO_CLASS_BE; aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
if (bclass == IOPRIO_CLASS_NONE) if (!ioprio_valid(bprio))
bclass = IOPRIO_CLASS_BE; bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
aclass = IOPRIO_PRIO_CLASS(aprio);
bclass = IOPRIO_PRIO_CLASS(bprio);
if (aclass == bclass) if (aclass == bclass)
return min(aprio, bprio); return min(aprio, bprio);
if (aclass > bclass) if (aclass > bclass)
......
...@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto error; goto error_free_buffer;
} }
blk_rq_set_block_pc(rq); blk_rq_set_block_pc(rq);
...@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
} }
error: error:
blk_put_request(rq);
error_free_buffer:
kfree(buffer); kfree(buffer);
if (rq)
blk_put_request(rq);
return err; return err;
} }
EXPORT_SYMBOL_GPL(sg_scsi_ioctl); EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment