Commit ecc3f712 authored by Andrew Morton's avatar Andrew Morton Committed by Jens Axboe

[PATCH] blk_congestion_wait tuning and lockup fix

blk_congestion_wait() will currently not wait if there are no write requests
in flight.  Which is a potential problem if all the dirty data is against NFS
filesystems.

For write(2) traffic against NFS, things work nicely, because writers
throttle in nfs_wait_on_requests().  But for MAP_SHARED dirtyings we need to
avoid spinning in balance_dirty_pages().  So allow callers to fall through to
the explicit sleep in that case.

This will also fix a weird lockup which the reiser4 developers report.  In
that case they have managed to have _all_ inodes against a superblock in
locked state, yet there are no write requests in flight.  Taking a nap in
blk_congestion_wait() in this case will yield the CPU to the threads which
are trying to write out pages.

Also tune up the sleep durations in various callers - 250 milliseconds seems
rather long.
parent c2e7eeb0
...@@ -1604,13 +1604,9 @@ void blk_congestion_wait(int rw, long timeout) ...@@ -1604,13 +1604,9 @@ void blk_congestion_wait(int rw, long timeout)
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct congestion_state *cs = &congestion_states[rw]; struct congestion_state *cs = &congestion_states[rw];
if (!atomic_read(&cs->nr_active_queues))
return;
blk_run_queues(); blk_run_queues();
prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&cs->nr_active_queues)) io_schedule_timeout(timeout);
io_schedule_timeout(timeout);
finish_wait(&cs->wqh, &wait); finish_wait(&cs->wqh, &wait);
} }
......
...@@ -301,7 +301,7 @@ static void wb_kupdate(unsigned long arg) ...@@ -301,7 +301,7 @@ static void wb_kupdate(unsigned long arg)
writeback_inodes(&wbc); writeback_inodes(&wbc);
if (wbc.nr_to_write > 0) { if (wbc.nr_to_write > 0) {
if (wbc.encountered_congestion) if (wbc.encountered_congestion)
blk_congestion_wait(WRITE, HZ); blk_congestion_wait(WRITE, HZ/10);
else else
break; /* All the old data is written */ break; /* All the old data is written */
} }
......
...@@ -835,7 +835,7 @@ try_to_free_pages(struct zone *classzone, ...@@ -835,7 +835,7 @@ try_to_free_pages(struct zone *classzone,
wakeup_bdflush(total_scanned); wakeup_bdflush(total_scanned);
/* Take a nap, wait for some writeback to complete */ /* Take a nap, wait for some writeback to complete */
blk_congestion_wait(WRITE, HZ/4); blk_congestion_wait(WRITE, HZ/10);
shrink_slab(total_scanned, gfp_mask); shrink_slab(total_scanned, gfp_mask);
} }
if (gfp_mask & __GFP_FS) if (gfp_mask & __GFP_FS)
...@@ -904,7 +904,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps) ...@@ -904,7 +904,7 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
} }
if (all_zones_ok) if (all_zones_ok)
break; break;
blk_congestion_wait(WRITE, HZ/4); blk_congestion_wait(WRITE, HZ/10);
} }
return nr_pages - to_free; return nr_pages - to_free;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment