Commit b8dcdab3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180825' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few small fixes for this merge window:

   - Locking imbalance fix for bcache (Shan Hai)

   - A few small fixes for wbt. One is a cleanup/prep, one is a fix for
     an existing issue, and the last two are fixes for changes that went
     into this merge window (me)"

* tag 'for-linus-20180825' of git://git.kernel.dk/linux-block:
  blk-wbt: don't maintain inflight counts if disabled
  blk-wbt: fix has-sleeper queueing check
  blk-wbt: use wq_has_sleeper() for wq active check
  blk-wbt: move disable check into get_limit()
  bcache: release dc->writeback_lock properly in bch_writeback_thread()
parents db84abf5 c125311d
...@@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, ...@@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
else if (val >= 0) else if (val >= 0)
val *= 1000ULL; val *= 1000ULL;
wbt_set_min_lat(q, val); /*
* Ensure that the queue is idled, in case the latency update
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
if (q->mq_ops) {
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
} else
blk_queue_bypass_start(q);
wbt_set_min_lat(q, val);
wbt_update_limits(q); wbt_update_limits(q);
if (q->mq_ops) {
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
} else
blk_queue_bypass_end(q);
return count; return count;
} }
......
...@@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb) ...@@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb)
for (i = 0; i < WBT_NUM_RWQ; i++) { for (i = 0; i < WBT_NUM_RWQ; i++) {
struct rq_wait *rqw = &rwb->rq_wait[i]; struct rq_wait *rqw = &rwb->rq_wait[i];
if (waitqueue_active(&rqw->wait)) if (wq_has_sleeper(&rqw->wait))
wake_up_all(&rqw->wait); wake_up_all(&rqw->wait);
} }
} }
...@@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) ...@@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
if (inflight && inflight >= limit) if (inflight && inflight >= limit)
return; return;
if (waitqueue_active(&rqw->wait)) { if (wq_has_sleeper(&rqw->wait)) {
int diff = limit - inflight; int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2) if (!inflight || diff >= rwb->wb_background / 2)
...@@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) ...@@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
{ {
unsigned int limit; unsigned int limit;
/*
* If we got disabled, just return UINT_MAX. This ensures that
* we'll properly inc a new IO, and dec+wakeup at the end.
*/
if (!rwb_enabled(rwb))
return UINT_MAX;
if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD) if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
return rwb->wb_background; return rwb->wb_background;
...@@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, ...@@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
{ {
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
bool has_sleeper;
/* has_sleeper = wq_has_sleeper(&rqw->wait);
* inc it here even if disabled, since we'll dec it at completion. if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
* this only happens if the task was sleeping in __wbt_wait(),
* and someone turned it off at the same time.
*/
if (!rwb_enabled(rwb)) {
atomic_inc(&rqw->inflight);
return;
}
if (!waitqueue_active(&rqw->wait)
&& rq_wait_inc_below(rqw, get_limit(rwb, rw)))
return; return;
add_wait_queue_exclusive(&rqw->wait, &wait); add_wait_queue_exclusive(&rqw->wait, &wait);
do { do {
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
if (!rwb_enabled(rwb)) { if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
atomic_inc(&rqw->inflight);
break;
}
if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
break; break;
if (lock) { if (lock) {
...@@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, ...@@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
spin_lock_irq(lock); spin_lock_irq(lock);
} else } else
io_schedule(); io_schedule();
has_sleeper = false;
} while (1); } while (1);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) ...@@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
{ {
enum wbt_flags flags = 0; enum wbt_flags flags = 0;
if (!rwb_enabled(rwb))
return 0;
if (bio_op(bio) == REQ_OP_READ) { if (bio_op(bio) == REQ_OP_READ) {
flags = WBT_READ; flags = WBT_READ;
} else if (wbt_should_throttle(rwb, bio)) { } else if (wbt_should_throttle(rwb, bio)) {
......
...@@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg) ...@@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg)
* data on cache. BCACHE_DEV_DETACHING flag is set in * data on cache. BCACHE_DEV_DETACHING flag is set in
* bch_cached_dev_detach(). * bch_cached_dev_detach().
*/ */
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
up_write(&dc->writeback_lock);
break; break;
}
} }
up_write(&dc->writeback_lock); up_write(&dc->writeback_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment