Commit 418f398e authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Remove most of the blk_run_queues() calls

We don't need these with self-unplugging queues.

The patch also contains a couple of microopts suggested by Andrea: we
don't need to run sync_page() if the page just came unlocked.
parent 00c8e791
...@@ -127,9 +127,10 @@ void __wait_on_buffer(struct buffer_head * bh) ...@@ -127,9 +127,10 @@ void __wait_on_buffer(struct buffer_head * bh)
get_bh(bh); get_bh(bh);
do { do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
blk_run_queues(); blk_run_queues();
if (buffer_locked(bh))
io_schedule(); io_schedule();
}
} while (buffer_locked(bh)); } while (buffer_locked(bh));
put_bh(bh); put_bh(bh);
finish_wait(wqh, &wait); finish_wait(wqh, &wait);
...@@ -959,8 +960,6 @@ create_buffers(struct page * page, unsigned long size, int retry) ...@@ -959,8 +960,6 @@ create_buffers(struct page * page, unsigned long size, int retry)
* the reserve list is empty, we're sure there are * the reserve list is empty, we're sure there are
* async buffer heads in use. * async buffer heads in use.
*/ */
blk_run_queues();
free_more_memory(); free_more_memory();
goto try_again; goto try_again;
} }
......
...@@ -334,7 +334,6 @@ writeback_inodes(struct writeback_control *wbc) ...@@ -334,7 +334,6 @@ writeback_inodes(struct writeback_control *wbc)
} }
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
blk_run_queues();
} }
/* /*
......
...@@ -618,7 +618,6 @@ mpage_writepages(struct address_space *mapping, ...@@ -618,7 +618,6 @@ mpage_writepages(struct address_space *mapping,
int (*writepage)(struct page *page, struct writeback_control *wbc); int (*writepage)(struct page *page, struct writeback_control *wbc);
if (wbc->nonblocking && bdi_write_congested(bdi)) { if (wbc->nonblocking && bdi_write_congested(bdi)) {
blk_run_queues();
wbc->encountered_congestion = 1; wbc->encountered_congestion = 1;
return 0; return 0;
} }
...@@ -673,7 +672,6 @@ mpage_writepages(struct address_space *mapping, ...@@ -673,7 +672,6 @@ mpage_writepages(struct address_space *mapping,
if (ret || (--(wbc->nr_to_write) <= 0)) if (ret || (--(wbc->nr_to_write) <= 0))
done = 1; done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) { if (wbc->nonblocking && bdi_write_congested(bdi)) {
blk_run_queues();
wbc->encountered_congestion = 1; wbc->encountered_congestion = 1;
done = 1; done = 1;
} }
......
...@@ -259,9 +259,10 @@ void wait_on_page_bit(struct page *page, int bit_nr) ...@@ -259,9 +259,10 @@ void wait_on_page_bit(struct page *page, int bit_nr)
do { do {
prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
if (test_bit(bit_nr, &page->flags)) {
sync_page(page); sync_page(page);
if (test_bit(bit_nr, &page->flags))
io_schedule(); io_schedule();
}
} while (test_bit(bit_nr, &page->flags)); } while (test_bit(bit_nr, &page->flags));
finish_wait(waitqueue, &wait); finish_wait(waitqueue, &wait);
} }
...@@ -326,10 +327,11 @@ void __lock_page(struct page *page) ...@@ -326,10 +327,11 @@ void __lock_page(struct page *page)
while (TestSetPageLocked(page)) { while (TestSetPageLocked(page)) {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
if (PageLocked(page)) {
sync_page(page); sync_page(page);
if (PageLocked(page))
io_schedule(); io_schedule();
} }
}
finish_wait(wqh, &wait); finish_wait(wqh, &wait);
} }
EXPORT_SYMBOL(__lock_page); EXPORT_SYMBOL(__lock_page);
......
...@@ -237,7 +237,6 @@ static void background_writeout(unsigned long _min_pages) ...@@ -237,7 +237,6 @@ static void background_writeout(unsigned long _min_pages)
break; break;
} }
} }
blk_run_queues();
} }
/* /*
...@@ -308,7 +307,6 @@ static void wb_kupdate(unsigned long arg) ...@@ -308,7 +307,6 @@ static void wb_kupdate(unsigned long arg)
} }
nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
} }
blk_run_queues();
if (time_before(next_jif, jiffies + HZ)) if (time_before(next_jif, jiffies + HZ))
next_jif = jiffies + HZ; next_jif = jiffies + HZ;
mod_timer(&wb_timer, next_jif); mod_timer(&wb_timer, next_jif);
......
...@@ -236,10 +236,8 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, ...@@ -236,10 +236,8 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
* uptodate then the caller will launch readpage again, and * uptodate then the caller will launch readpage again, and
* will then handle the error. * will then handle the error.
*/ */
if (ret) { if (ret)
read_pages(mapping, filp, &page_pool, ret); read_pages(mapping, filp, &page_pool, ret);
blk_run_queues();
}
BUG_ON(!list_empty(&page_pool)); BUG_ON(!list_empty(&page_pool));
out: out:
return ret; return ret;
......
...@@ -957,7 +957,6 @@ int kswapd(void *p) ...@@ -957,7 +957,6 @@ int kswapd(void *p)
finish_wait(&pgdat->kswapd_wait, &wait); finish_wait(&pgdat->kswapd_wait, &wait);
get_page_state(&ps); get_page_state(&ps);
balance_pgdat(pgdat, 0, &ps); balance_pgdat(pgdat, 0, &ps);
blk_run_queues();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment