Commit e61b17ab authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.9pre2

parent b34c35ce
......@@ -234,9 +234,9 @@ L: linux-kernel@vger.rutgers.edu
S: Maintained
DIGI INTL. EPCA DRIVER
P: Daniel Taylor
P: Chad Schwartz
M: support@dgii.com
M: danielt@dgii.com
M: chads@dgii.com
L: digilnux@dgii.com
S: Maintained
......
......@@ -366,7 +366,7 @@ __initfunc(static void ide_setup_pci_device (struct pci_dev *dev, ide_pci_device
byte tmp = 0;
ide_hwif_t *hwif, *mate = NULL;
#ifdef CONFIG_IDEDMA_AUTO
#ifdef CONFIG_IDEDMA_PCI_AUTO
autodma = 1;
#endif
check_if_enabled:
......
......@@ -17,6 +17,7 @@
#include <linux/locks.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/io.h>
......
......@@ -688,6 +688,90 @@ static void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
unlock_buffer(bh);
}
static void end_buffer_io_bad(struct buffer_head *bh, int uptodate)
{
mark_buffer_uptodate(bh, uptodate);
unlock_buffer(bh);
BUG();
}
static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
{
static spinlock_t page_uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
int free;
mark_buffer_uptodate(bh, uptodate);
/* This is a temporary buffer used for page I/O. */
page = mem_map + MAP_NR(bh->b_data);
if (!uptodate)
SetPageError(page);
/*
* Be _very_ careful from here on. Bad things can happen if
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*
* Async buffer_heads are here only as labels for IO, and get
* thrown away once the IO for this page is complete. IO is
* deemed complete once all buffers have been visited
* (b_count==0) and are now unlocked. We must make sure that
* only the _last_ buffer that decrements its count is the one
* that free's the page..
*/
spin_lock_irqsave(&page_uptodate_lock, flags);
unlock_buffer(bh);
tmp = bh->b_this_page;
while (tmp != bh) {
if (buffer_locked(tmp))
goto still_busy;
tmp = tmp->b_this_page;
}
/* OK, the async IO on this page is complete. */
spin_unlock_irqrestore(&page_uptodate_lock, flags);
/*
* if none of the buffers had errors then we can set the
* page uptodate:
*/
if (!PageError(page))
SetPageUptodate(page);
/*
* Run the hooks that have to be done when a page I/O has completed.
*
* Note - we need to test the flags before we unlock the page, but
* we must not actually free the page until after the unlock!
*/
if (test_and_clear_bit(PG_decr_after, &page->flags))
atomic_dec(&nr_async_pages);
if (test_and_clear_bit(PG_free_swap_after, &page->flags))
swap_free(page->offset);
free = test_and_clear_bit(PG_free_after, &page->flags);
if (page->owner != -1)
PAGE_BUG(page);
page->owner = (int)current;
UnlockPage(page);
if (free)
__free_page(page);
return;
still_busy:
spin_unlock_irqrestore(&page_uptodate_lock, flags);
return;
}
/*
* Ok, this is getblk, and it isn't very clear, again to hinder
* race-conditions. Most of the code is seldom used, (ie repeating),
......@@ -954,6 +1038,7 @@ static void put_unused_buffer_head(struct buffer_head * bh)
init_waitqueue_head(&bh->b_wait);
nr_unused_buffer_heads++;
bh->b_next_free = unused_list;
bh->b_this_page = NULL;
unused_list = bh;
}
......@@ -1052,8 +1137,7 @@ static struct buffer_head * get_unused_buffer_head(int async)
* from ordinary buffer allocations, and only async requests are allowed
* to sleep waiting for buffer heads.
*/
static struct buffer_head * create_buffers(unsigned long page,
unsigned long size, int async)
static struct buffer_head * create_buffers(unsigned long page, unsigned long size, int async)
{
DECLARE_WAITQUEUE(wait, current);
struct buffer_head *bh, *head;
......@@ -1077,7 +1161,9 @@ static struct buffer_head * create_buffers(unsigned long page,
bh->b_size = size;
bh->b_data = (char *) (page+offset);
bh->b_list = 0;
bh->b_list = BUF_CLEAN;
bh->b_flushtime = 0;
bh->b_end_io = end_buffer_io_bad;
}
return head;
/*
......@@ -1125,108 +1211,7 @@ static struct buffer_head * create_buffers(unsigned long page,
goto try_again;
}
/* Run the hooks that have to be done when a page I/O has completed. */
static inline void after_unlock_page (struct page * page)
{
if (test_and_clear_bit(PG_decr_after, &page->flags)) {
atomic_dec(&nr_async_pages);
#ifdef DEBUG_SWAP
printk ("DebugVM: Finished IO on page %p, nr_async_pages %d\n",
(char *) page_address(page),
atomic_read(&nr_async_pages));
#endif
}
if (test_and_clear_bit(PG_swap_unlock_after, &page->flags))
swap_after_unlock_page(page->offset);
if (test_and_clear_bit(PG_free_after, &page->flags))
__free_page(page);
}
/*
* Free all temporary buffers belonging to a page.
* This needs to be called with interrupts disabled.
*/
static inline void free_async_buffers (struct buffer_head * bh)
{
struct buffer_head *tmp, *tail;
/*
* Link all the buffers into the b_next_free list,
* so we only have to do one xchg() operation ...
*/
tail = bh;
while ((tmp = tail->b_this_page) != bh) {
tail->b_next_free = tmp;
tail = tmp;
};
/* Update the reuse list */
tail->b_next_free = xchg(&reuse_list, NULL);
reuse_list = bh;
/* Wake up any waiters ... */
wake_up(&buffer_wait);
}
static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
{
unsigned long flags;
struct buffer_head *tmp;
struct page *page;
mark_buffer_uptodate(bh, uptodate);
/* This is a temporary buffer used for page I/O. */
page = mem_map + MAP_NR(bh->b_data);
if (!uptodate)
SetPageError(page);
/*
* Be _very_ careful from here on. Bad things can happen if
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*
* Async buffer_heads are here only as labels for IO, and get
* thrown away once the IO for this page is complete. IO is
* deemed complete once all buffers have been visited
* (b_count==0) and are now unlocked. We must make sure that
* only the _last_ buffer that decrements its count is the one
* that free's the page..
*/
save_flags(flags);
cli();
unlock_buffer(bh);
tmp = bh->b_this_page;
while (tmp != bh) {
if (buffer_locked(tmp))
goto still_busy;
tmp = tmp->b_this_page;
}
/* OK, the async IO on this page is complete. */
restore_flags(flags);
after_unlock_page(page);
/*
* if none of the buffers had errors then we can set the
* page uptodate:
*/
if (!PageError(page))
SetPageUptodate(page);
if (page->owner != -1)
PAGE_BUG(page);
page->owner = (int)current;
UnlockPage(page);
return;
still_busy:
restore_flags(flags);
return;
}
static int create_page_buffers (int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
static int create_page_buffers(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
{
struct buffer_head *head, *bh, *tail;
int block;
......@@ -1305,6 +1290,7 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
if (bh->b_dev == B_FREE)
BUG();
mark_buffer_clean(bh);
clear_bit(BH_Uptodate, &bh->b_state);
bh->b_blocknr = 0;
bh->b_count--;
}
......@@ -1318,16 +1304,21 @@ int block_flushpage(struct inode *inode, struct page *page, unsigned long offset
* the 'final' flushpage. We have invalidated the bmap
* cached value unconditionally, so real IO is not
* possible anymore.
*
* If the free doesn't work out, the buffers can be
* left around - they just turn into anonymous buffers
* instead.
*/
if (!offset)
try_to_free_buffers(page);
if (!offset) {
if (!try_to_free_buffers(page))
buffermem += PAGE_CACHE_SIZE;
}
unlock_kernel();
return 0;
}
static void create_empty_buffers (struct page *page,
struct inode *inode, unsigned long blocksize)
static void create_empty_buffers(struct page *page, struct inode *inode, unsigned long blocksize)
{
struct buffer_head *bh, *head, *tail;
......@@ -1341,6 +1332,7 @@ static void create_empty_buffers (struct page *page,
do {
bh->b_dev = inode->i_dev;
bh->b_blocknr = 0;
bh->b_end_io = end_buffer_io_bad;
tail = bh;
bh = bh->b_this_page;
} while (bh);
......@@ -1357,8 +1349,8 @@ int block_write_full_page (struct file *file, struct page *page, fs_getblock_t f
{
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
int err, created, i;
unsigned long block, phys, offset;
int err, i;
unsigned long block, offset;
struct buffer_head *bh, *head;
if (!PageLocked(page))
......@@ -1381,22 +1373,21 @@ int block_write_full_page (struct file *file, struct page *page, fs_getblock_t f
if (!bh)
BUG();
if (!bh->b_blocknr) {
err = -EIO;
phys = fs_get_block (inode, block, 1, &err, &created);
if (!phys)
/*
* If the buffer isn't up-to-date, we can't be sure
* that the buffer has been initialized with the proper
* block number information etc..
*
* Leave it to the low-level FS to make all those
* decisions (block #0 may actually be a valid block)
*/
bh->b_end_io = end_buffer_io_sync;
if (!buffer_uptodate(bh)) {
err = fs_get_block(inode, block, bh, 0);
if (err)
goto out;
init_buffer(bh, inode->i_dev, phys, end_buffer_io_sync, NULL);
bh->b_state = (1<<BH_Uptodate);
} else {
/*
* block already exists, just mark it uptodate and
* dirty:
*/
bh->b_end_io = end_buffer_io_sync;
set_bit(BH_Uptodate, &bh->b_state);
}
set_bit(BH_Uptodate, &bh->b_state);
atomic_mark_buffer_dirty(bh,0);
bh = bh->b_this_page;
......@@ -1415,10 +1406,10 @@ int block_write_partial_page (struct file *file, struct page *page, unsigned lon
struct dentry *dentry = file->f_dentry;
struct inode *inode = dentry->d_inode;
unsigned long block;
int err, created, partial;
int err, partial;
unsigned long blocksize, start_block, end_block;
unsigned long start_offset, start_bytes, end_bytes;
unsigned long bbits, phys, blocks, i, len;
unsigned long bbits, blocks, i, len;
struct buffer_head *bh, *head;
char * target_buf;
......@@ -1469,43 +1460,23 @@ int block_write_partial_page (struct file *file, struct page *page, unsigned lon
partial = 1;
goto skip;
}
if (!bh->b_blocknr) {
err = -EIO;
phys = fs_get_block (inode, block, 1, &err, &created);
if (!phys)
goto out;
init_buffer(bh, inode->i_dev, phys, end_buffer_io_sync, NULL);
/*
* if partially written block which has contents on
* disk, then we have to read it first.
* We also rely on the fact that filesystem holes
* cannot be written.
*/
if (start_offset || (end_bytes && (i == end_block))) {
if (created) {
memset(bh->b_data, 0, bh->b_size);
} else {
bh->b_state = 0;
ll_rw_block(READ, 1, &bh);
lock_kernel();
wait_on_buffer(bh);
unlock_kernel();
err = -EIO;
if (!buffer_uptodate(bh))
goto out;
}
}
/*
* If the buffer is not up-to-date, we need to ask the low-level
* FS to do something for us (we used to have assumptions about
* the meaning of b_blocknr etc, that's bad).
*
* If "update" is set, that means that the low-level FS should
* try to make sure that the block is up-to-date because we're
* not going to fill it completely.
*/
bh->b_end_io = end_buffer_io_sync;
if (!buffer_uptodate(bh)) {
int update = start_offset || (end_bytes && (i == end_block));
bh->b_state = (1<<BH_Uptodate);
} else {
/*
* block already exists, just mark it uptodate:
*/
bh->b_end_io = end_buffer_io_sync;
set_bit(BH_Uptodate, &bh->b_state);
created = 0;
err = fs_get_block(inode, block, bh, update);
if (err)
goto out;
}
err = -EFAULT;
......@@ -1538,6 +1509,7 @@ int block_write_partial_page (struct file *file, struct page *page, unsigned lon
* should not penalize them for somebody else writing
* lots of dirty pages.
*/
set_bit(BH_Uptodate, &bh->b_state);
if (!test_and_set_bit(BH_Dirty, &bh->b_state)) {
lock_kernel();
__mark_dirty(bh, 0);
......@@ -1627,7 +1599,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
BUG();
}
set_bit(BH_Uptodate, &bh->b_state);
atomic_mark_buffer_dirty(bh, 0);
set_bit(BH_Dirty, &bh->b_state);
arr[nr++] = bh;
}
bh = bh->b_this_page;
......@@ -1714,8 +1686,7 @@ int block_read_full_page(struct file * file, struct page * page)
* this is safe to do because we hold the page lock:
*/
if (phys_block) {
init_buffer(bh, inode->i_dev, phys_block,
end_buffer_io_async, NULL);
init_buffer(bh, inode->i_dev, phys_block, end_buffer_io_async, NULL);
arr[nr] = bh;
nr++;
} else {
......@@ -1802,7 +1773,7 @@ static int grow_buffers(int size)
* Can the buffer be thrown out?
*/
#define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
#define buffer_busy(bh) ((bh)->b_count || ((bh)->b_state & BUFFER_BUSY_BITS))
#define buffer_busy(bh) ((bh)->b_count | ((bh)->b_state & BUFFER_BUSY_BITS))
/*
* try_to_free_buffers() checks if all the buffers on this particular page
......@@ -1820,12 +1791,8 @@ int try_to_free_buffers(struct page * page)
struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
if (!buffer_busy(p))
continue;
too_many_dirty_buffers = 1;
wakeup_bdflush(0);
return 0;
if (buffer_busy(p))
goto busy_buffer_page;
} while (tmp != bh);
tmp = bh;
......@@ -1847,10 +1814,13 @@ int try_to_free_buffers(struct page * page)
/* And free the page */
page->buffers = NULL;
if (__free_page(page)) {
buffermem -= PAGE_SIZE;
return 1;
}
__free_page(page);
return 1;
busy_buffer_page:
/* Uhhuh, star writeback so that we don't end up with all dirty pages */
too_many_dirty_buffers = 1;
wakeup_bdflush(0);
return 0;
}
......@@ -2208,7 +2178,10 @@ int bdflush(void * unused)
*/
if (!too_many_dirty_buffers || nr_buffers_type[BUF_DIRTY] < bdf_prm.b_un.ndirty) {
too_many_dirty_buffers = 0;
sleep_on_timeout(&bdflush_wait, 5*HZ);
spin_lock_irq(&current->sigmask_lock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
interruptible_sleep_on_timeout(&bdflush_wait, 5*HZ);
}
}
}
......@@ -106,14 +106,48 @@ static inline void remove_suid(struct inode *inode)
}
}
static int ext2_get_block(struct inode *inode, unsigned long block, struct buffer_head *bh, int update)
{
if (!bh->b_blocknr) {
int error, created;
unsigned long blocknr;
blocknr = ext2_getblk_block(inode, block, 1, &error, &created);
if (!blocknr)
return error;
bh->b_dev = inode->i_dev;
bh->b_blocknr = blocknr;
if (!update)
return 0;
if (created) {
memset(bh->b_data, 0, bh->b_size);
set_bit(BH_Uptodate, &bh->b_state);
return 0;
}
}
if (!update)
return 0;
lock_kernel();
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
unlock_kernel();
return buffer_uptodate(bh) ? 0 : -EIO;
}
static int ext2_writepage (struct file * file, struct page * page)
{
return block_write_full_page(file, page, ext2_getblk_block);
return block_write_full_page(file, page, ext2_get_block);
}
static long ext2_write_one_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
{
return block_write_partial_page(file, page, offset, bytes, buf, ext2_getblk_block);
return block_write_partial_page(file, page, offset, bytes, buf, ext2_get_block);
}
/*
......
......@@ -174,6 +174,7 @@ int ext2_bmap (struct inode * inode, int block)
ret = block_bmap (bread (inode->i_dev, i,
inode->i_sb->s_blocksize),
block & (addr_per_block - 1));
goto out;
}
block -= (1 << (addr_per_block_bits * 2));
i = inode_bmap (inode, EXT2_TIND_BLOCK);
......
......@@ -555,7 +555,7 @@ static int empty_dir (struct inode * inode)
while (offset < inode->i_size ) {
if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
brelse (bh);
bh = ext2_bread (inode, offset >> EXT2_BLOCK_SIZE_BITS(sb), 1, &err);
bh = ext2_bread (inode, offset >> EXT2_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
#if 0
ext2_error (sb, "empty_dir",
......
#define NUMCARDS 1
#define NBDEVS 2
#define NUMCARDS 0
#define NBDEVS 0
struct board_info static_boards[NUMCARDS]={
{ ENABLED, 0, OFF, 2, (unchar*) 0x320, (unchar*) 0xd0000 },
};
/* DO NOT HAND EDIT THIS FILE! */
......@@ -870,7 +870,7 @@ extern struct buffer_head * breada(kdev_t, int, int, unsigned int, unsigned int)
extern int brw_page(int, struct page *, kdev_t, int [], int, int);
typedef long (*writepage_t)(struct file *, struct page *, unsigned long, unsigned long, const char *);
typedef int (*fs_getblock_t)(struct inode *, long, int, int *, int *);
typedef int (*fs_getblock_t)(struct inode *, unsigned long, struct buffer_head *, int);
/* Generic buffer handling for block filesystems.. */
extern int block_read_full_page(struct file *, struct page *);
......
......@@ -148,7 +148,7 @@ typedef struct page {
#define PG_uptodate 3
#define PG_free_after 4
#define PG_decr_after 5
#define PG_swap_unlock_after 6
#define PG_free_swap_after 6
#define PG_DMA 7
#define PG_Slab 8
#define PG_swap_cache 9
......@@ -182,7 +182,7 @@ if (!test_and_clear_bit(PG_locked, &(page)->flags)) { \
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
#define PageSwapUnlockAfter(page) (test_bit(PG_free_swap_after, &(page)->flags))
#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
#define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
#define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
......
......@@ -273,8 +273,8 @@ int shrink_mmap(int priority, int gfp_mask)
continue;
}
if (!page_count(page)) {
// BUG();
spin_unlock(&pagecache_lock);
BUG();
continue;
}
get_page(page);
......@@ -292,13 +292,18 @@ int shrink_mmap(int priority, int gfp_mask)
/* Is it a buffer page? */
if (page->buffers) {
int mem = page->inode ? 0 : PAGE_CACHE_SIZE;
spin_unlock(&pagecache_lock);
if (try_to_free_buffers(page))
goto made_progress;
if (!try_to_free_buffers(page))
goto unlock_continue;
buffermem -= mem;
spin_lock(&pagecache_lock);
}
/* We can't free pages unless there's just one user */
/*
* We can't free pages unless there's just one user
* (count == 2 because we added one ourselves above).
*/
if (page_count(page) != 2)
goto spin_unlock_continue;
......
......@@ -786,8 +786,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
*/
flags = vma->vm_flags;
addr = vma->vm_start;
lock_kernel(); /* kswapd, ugh */
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
unlock_kernel();
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
......
......@@ -147,8 +147,7 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
atomic_inc(&nr_async_pages);
}
if (dolock) {
/* only lock/unlock swap cache pages! */
set_bit(PG_swap_unlock_after, &page->flags);
set_bit(PG_free_swap_after, &page->flags);
p->swap_map[offset]++;
}
set_bit(PG_free_after, &page->flags);
......@@ -176,15 +175,6 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
#endif
}
/*
* This is run when asynchronous page I/O has completed.
* It decrements the swap bitmap counter
*/
void swap_after_unlock_page(unsigned long entry)
{
swap_free(entry);
}
/*
* A simple wrapper so the base function doesn't need to enforce
* that all swap pages go through the swap cache! We verify that:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment