Commit e61b17ab authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.9pre2

parent b34c35ce
......@@ -234,9 +234,9 @@ L: linux-kernel@vger.rutgers.edu
S: Maintained
DIGI INTL. EPCA DRIVER
P: Daniel Taylor
P: Chad Schwartz
M: support@dgii.com
M: danielt@dgii.com
M: chads@dgii.com
L: digilnux@dgii.com
S: Maintained
......
......@@ -366,7 +366,7 @@ __initfunc(static void ide_setup_pci_device (struct pci_dev *dev, ide_pci_device
byte tmp = 0;
ide_hwif_t *hwif, *mate = NULL;
#ifdef CONFIG_IDEDMA_AUTO
#ifdef CONFIG_IDEDMA_PCI_AUTO
autodma = 1;
#endif
check_if_enabled:
......
......@@ -17,6 +17,7 @@
#include <linux/locks.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/io.h>
......
This diff is collapsed.
......@@ -106,14 +106,48 @@ static inline void remove_suid(struct inode *inode)
}
}
static int ext2_get_block(struct inode *inode, unsigned long block, struct buffer_head *bh, int update)
{
if (!bh->b_blocknr) {
int error, created;
unsigned long blocknr;
blocknr = ext2_getblk_block(inode, block, 1, &error, &created);
if (!blocknr)
return error;
bh->b_dev = inode->i_dev;
bh->b_blocknr = blocknr;
if (!update)
return 0;
if (created) {
memset(bh->b_data, 0, bh->b_size);
set_bit(BH_Uptodate, &bh->b_state);
return 0;
}
}
if (!update)
return 0;
lock_kernel();
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
unlock_kernel();
return buffer_uptodate(bh) ? 0 : -EIO;
}
static int ext2_writepage (struct file * file, struct page * page)
{
return block_write_full_page(file, page, ext2_getblk_block);
return block_write_full_page(file, page, ext2_get_block);
}
static long ext2_write_one_page (struct file *file, struct page *page, unsigned long offset, unsigned long bytes, const char * buf)
{
return block_write_partial_page(file, page, offset, bytes, buf, ext2_getblk_block);
return block_write_partial_page(file, page, offset, bytes, buf, ext2_get_block);
}
/*
......
......@@ -174,6 +174,7 @@ int ext2_bmap (struct inode * inode, int block)
ret = block_bmap (bread (inode->i_dev, i,
inode->i_sb->s_blocksize),
block & (addr_per_block - 1));
goto out;
}
block -= (1 << (addr_per_block_bits * 2));
i = inode_bmap (inode, EXT2_TIND_BLOCK);
......
......@@ -555,7 +555,7 @@ static int empty_dir (struct inode * inode)
while (offset < inode->i_size ) {
if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
brelse (bh);
bh = ext2_bread (inode, offset >> EXT2_BLOCK_SIZE_BITS(sb), 1, &err);
bh = ext2_bread (inode, offset >> EXT2_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
#if 0
ext2_error (sb, "empty_dir",
......
#define NUMCARDS 1
#define NBDEVS 2
#define NUMCARDS 0
#define NBDEVS 0
struct board_info static_boards[NUMCARDS]={
{ ENABLED, 0, OFF, 2, (unchar*) 0x320, (unchar*) 0xd0000 },
};
/* DO NOT HAND EDIT THIS FILE! */
......@@ -870,7 +870,7 @@ extern struct buffer_head * breada(kdev_t, int, int, unsigned int, unsigned int)
extern int brw_page(int, struct page *, kdev_t, int [], int, int);
typedef long (*writepage_t)(struct file *, struct page *, unsigned long, unsigned long, const char *);
typedef int (*fs_getblock_t)(struct inode *, long, int, int *, int *);
typedef int (*fs_getblock_t)(struct inode *, unsigned long, struct buffer_head *, int);
/* Generic buffer handling for block filesystems.. */
extern int block_read_full_page(struct file *, struct page *);
......
......@@ -148,7 +148,7 @@ typedef struct page {
#define PG_uptodate 3
#define PG_free_after 4
#define PG_decr_after 5
#define PG_swap_unlock_after 6
#define PG_free_swap_after 6
#define PG_DMA 7
#define PG_Slab 8
#define PG_swap_cache 9
......@@ -182,7 +182,7 @@ if (!test_and_clear_bit(PG_locked, &(page)->flags)) { \
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
#define PageSwapUnlockAfter(page) (test_bit(PG_free_swap_after, &(page)->flags))
#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
#define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
#define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
......
......@@ -273,8 +273,8 @@ int shrink_mmap(int priority, int gfp_mask)
continue;
}
if (!page_count(page)) {
// BUG();
spin_unlock(&pagecache_lock);
BUG();
continue;
}
get_page(page);
......@@ -292,13 +292,18 @@ int shrink_mmap(int priority, int gfp_mask)
/* Is it a buffer page? */
if (page->buffers) {
int mem = page->inode ? 0 : PAGE_CACHE_SIZE;
spin_unlock(&pagecache_lock);
if (try_to_free_buffers(page))
goto made_progress;
if (!try_to_free_buffers(page))
goto unlock_continue;
buffermem -= mem;
spin_lock(&pagecache_lock);
}
/* We can't free pages unless there's just one user */
/*
* We can't free pages unless there's just one user
* (count == 2 because we added one ourselves above).
*/
if (page_count(page) != 2)
goto spin_unlock_continue;
......
......@@ -786,8 +786,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
*/
flags = vma->vm_flags;
addr = vma->vm_start;
lock_kernel(); /* kswapd, ugh */
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
unlock_kernel();
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
......
......@@ -147,8 +147,7 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
atomic_inc(&nr_async_pages);
}
if (dolock) {
/* only lock/unlock swap cache pages! */
set_bit(PG_swap_unlock_after, &page->flags);
set_bit(PG_free_swap_after, &page->flags);
p->swap_map[offset]++;
}
set_bit(PG_free_after, &page->flags);
......@@ -176,15 +175,6 @@ static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, in
#endif
}
/*
* This is run when asynchronous page I/O has completed.
* It decrements the swap bitmap counter
*/
void swap_after_unlock_page(unsigned long entry)
{
swap_free(entry);
}
/*
* A simple wrapper so the base function doesn't need to enforce
* that all swap pages go through the swap cache! We verify that:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment