diff --git a/fs/ext2/truncate.c b/fs/ext2/truncate.c
index 859d5a9ad565dc89f6980208697078878b6ee95c..653936278dfc64145fa76bc4c42400759830fe35 100644
--- a/fs/ext2/truncate.c
+++ b/fs/ext2/truncate.c
@@ -50,8 +50,7 @@ static int ext2_secrm_seed = 152;	/* Random generator base */
  * there's no need to test for changes during the operation.
  */
 #define DIRECT_BLOCK(inode) \
-	((inode->i_size + inode->i_sb->s_blocksize - 1) / \
-			  inode->i_sb->s_blocksize)
+	((unsigned long) ((inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))
 #define INDIRECT_BLOCK(inode,offset) ((int)DIRECT_BLOCK(inode) - offset)
 #define DINDIRECT_BLOCK(inode,offset) \
 	(INDIRECT_BLOCK(inode,offset) / addr_per_block)
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 8001c5186e3c1abc857011a444ced5162d2d36cc..8da9396ccbcd98d24f24018b9d3d4ddefcbfe359 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1009,7 +1009,7 @@ int isofs_get_block(struct inode *inode, long iblock,
 
 abort_beyond_end:
 	printk("_isofs_bmap: block >= EOF (%ld, %ld)\n",
-	       iblock, inode->i_size);
+	       iblock, (unsigned long) inode->i_size);
 	goto abort;
 
 abort_too_many_sections:
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index ab1e514857e27d93d451ad795212251459cc7ea3..00b310e065b51cf70ba9b9504955de83edbd52a3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -680,7 +680,7 @@ printk("nfs_notify_change: revalidate failed, error=%d\n", error);
 	 */
 	if (attr->ia_valid & ATTR_SIZE) {
 		if (attr->ia_size != fattr.size)
-			printk("nfs_notify_change: attr=%ld, fattr=%d??\n",
+			printk("nfs_notify_change: attr=%Ld, fattr=%d??\n",
 				attr->ia_size, fattr.size);
 		inode->i_size  = attr->ia_size;
 		inode->i_mtime = fattr.mtime.seconds;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d3aa602c9917ea4e7c0b080f84d52d1fc6ed8d96..0567e080ead5426ee0a15072c0514eabb50e0bcb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -83,12 +83,9 @@ extern int * max_segments[MAX_BLKDEV];
 #define MAX_SEGMENTS MAX_SECTORS
 
 #define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
-#if 0  /* small readahead */
-#define MAX_READAHEAD PageAlignSize(4096*7)
-#define MIN_READAHEAD PageAlignSize(4096*2)
-#else /* large readahead */
-#define MAX_READAHEAD PageAlignSize(4096*31)
-#define MIN_READAHEAD PageAlignSize(4096*3)
-#endif
+
+/* read-ahead in pages.. */
+#define MAX_READAHEAD	31
+#define MIN_READAHEAD	3
 
 #endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c6ba53f15dcea0507d70693d235ff5e82506ebfe..4b1fadbf996ae21080cc0f1e7a27cf6efde39e8b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -302,7 +302,7 @@ struct iattr {
 	umode_t		ia_mode;
 	uid_t		ia_uid;
 	gid_t		ia_gid;
-	off_t		ia_size;
+	loff_t		ia_size;
 	time_t		ia_atime;
 	time_t		ia_mtime;
 	time_t		ia_ctime;
@@ -347,7 +347,7 @@ struct inode {
 	uid_t			i_uid;
 	gid_t			i_gid;
 	kdev_t			i_rdev;
-	off_t			i_size;
+	loff_t			i_size;
 	time_t			i_atime;
 	time_t			i_mtime;
 	time_t			i_ctime;
diff --git a/ipc/shm.c b/ipc/shm.c
index db5081451175fb0dbdc838e9650235129faac35b..05d863330761a33a05af2d20de0f55dac8376135 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -208,14 +208,12 @@ static int newseg (key_t key, int shmflg, size_t size)
 	int id, err;
 	unsigned int shmall, shmmni;
 
-	lock_kernel();
 	shmall = shm_prm[1];
 	shmmni = shm_prm[2];
 	if (shmmni > IPCMNI) {
 		printk ("shmmni reset to max of %u\n", IPCMNI);
 		shmmni = shm_prm[2] = IPCMNI;
 	}
-	unlock_kernel();
 
 	if (shmmni < used_segs)
 		return -ENOSPC;
@@ -282,10 +280,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
 	int err, id = 0;
 	size_t shmmax;
 
-	lock_kernel();
 	shmmax = shm_prm[0];
-	unlock_kernel();
-
 	if (size > shmmax)
 		return -EINVAL;
 
@@ -387,11 +382,11 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
 		err = -EFAULT;
 		if (!buf)
 			goto out;
-		lock_kernel();
+
 		shminfo.shmmni = shminfo.shmseg = shm_prm[2];
 		shminfo.shmmax = shm_prm[0];
 		shminfo.shmall = shm_prm[1];
-		unlock_kernel();
+
 		shminfo.shmmin = SHMMIN;
 		if(copy_to_user (buf, &shminfo, sizeof(struct shminfo)))
 			goto out_unlocked;
diff --git a/mm/filemap.c b/mm/filemap.c
index 5a61e47ba9e2aea04268e081076959019d95aee1..39c024c259bb876a9c99903b59f1b61f78c043bb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -567,7 +567,7 @@ static int read_cluster_nonblocking(struct file * file, unsigned long offset)
 			break;
 	}
 
-	return;
+	return error;
 }
 
 /* 
@@ -837,13 +837,14 @@ static inline int get_max_readahead(struct inode * inode)
 
 static void generic_file_readahead(int reada_ok,
 	struct file * filp, struct inode * inode,
-	unsigned long ppos, struct page * page)
+	struct page * page)
 {
+	unsigned long index = page->index;
 	unsigned long max_ahead, ahead;
 	unsigned long raend;
 	int max_readahead = get_max_readahead(inode);
 
-	raend = filp->f_raend & PAGE_CACHE_MASK;
+	raend = filp->f_raend;
 	max_ahead = 0;
 
 /*
@@ -855,14 +856,14 @@ static void generic_file_readahead(int reada_ok,
  * page only.
  */
 	if (PageLocked(page)) {
-		if (!filp->f_ralen || ppos >= raend || ppos + filp->f_ralen < raend) {
-			raend = ppos;
-			if (raend < inode->i_size)
+		if (!filp->f_ralen || index >= raend || index + filp->f_ralen < raend) {
+			raend = index;
+			if (raend < (unsigned long) (inode->i_size >> PAGE_CACHE_SHIFT))
 				max_ahead = filp->f_ramax;
 			filp->f_rawin = 0;
 			filp->f_ralen = PAGE_CACHE_SIZE;
 			if (!max_ahead) {
-				filp->f_raend  = ppos + filp->f_ralen;
+				filp->f_raend  = index + filp->f_ralen;
 				filp->f_rawin += filp->f_ralen;
 			}
 		}
@@ -876,7 +877,7 @@ static void generic_file_readahead(int reada_ok,
  * We will later force unplug device in order to force asynchronous read IO.
  */
 	else if (reada_ok && filp->f_ramax && raend >= PAGE_CACHE_SIZE &&
-		 ppos <= raend && ppos + filp->f_ralen >= raend) {
+		 index <= raend && index + filp->f_ralen >= raend) {
 /*
  * Add ONE page to max_ahead in order to try to have about the same IO max size
  * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
@@ -952,17 +953,16 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 {
 	struct dentry *dentry = filp->f_dentry;
 	struct inode *inode = dentry->d_inode;
-	unsigned long pos, pgpos;
+	unsigned long index, offset;
 	struct page *cached_page;
 	int reada_ok;
 	int error;
 	int max_readahead = get_max_readahead(inode);
-	unsigned long pgoff;
 
 	cached_page = NULL;
-	pos = *ppos;
-	pgpos = pos & PAGE_CACHE_MASK;
-	pgoff = pos >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_CACHE_SHIFT;
+	offset = *ppos & ~PAGE_CACHE_MASK;
+
 /*
  * If the current position is outside the previous read-ahead window, 
  * we reset the current read-ahead context and set read ahead max to zero
@@ -970,7 +970,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
  * otherwise, we assume that the file accesses are sequential enough to
  * continue read-ahead.
  */
-	if (pgpos > filp->f_raend || pgpos + filp->f_rawin < filp->f_raend) {
+	if (index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
 		reada_ok = 0;
 		filp->f_raend = 0;
 		filp->f_ralen = 0;
@@ -986,12 +986,12 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
  * Then, at least MIN_READAHEAD if read ahead is ok,
  * and at most MAX_READAHEAD in all cases.
  */
-	if (pos + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
+	if (!index && offset + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
 		filp->f_ramax = 0;
 	} else {
 		unsigned long needed;
 
-		needed = ((pos + desc->count) & PAGE_CACHE_MASK) - pgpos;
+		needed = ((offset + desc->count) >> PAGE_CACHE_SHIFT) + 1;
 
 		if (filp->f_ramax < needed)
 			filp->f_ramax = needed;
@@ -1004,17 +1004,27 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 
 	for (;;) {
 		struct page *page, **hash;
+		unsigned long end_index, nr;
 
-		if (pos >= inode->i_size)
+		end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+		if (index > end_index)
 			break;
+		nr = PAGE_CACHE_SIZE;
+		if (index == end_index) {
+			nr = inode->i_size & ~PAGE_CACHE_MASK;
+			if (nr <= offset)
+				break;
+		}
+
+		nr = nr - offset;
 
 		/*
 		 * Try to find the data in the page cache..
 		 */
-		hash = page_hash(&inode->i_data, pgoff);
+		hash = page_hash(&inode->i_data, index);
 
 		spin_lock(&pagecache_lock);
-		page = __find_page_nolock(&inode->i_data, pgoff, *hash);
+		page = __find_page_nolock(&inode->i_data, index, *hash);
 		if (!page)
 			goto no_cached_page;
 found_page:
@@ -1024,19 +1034,10 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 		if (!Page_Uptodate(page))
 			goto page_not_up_to_date;
 page_ok:
-	/*
-	 * Ok, we have the page, and it's up-to-date, so
-	 * now we can copy it to user space...
-	 */
-	{
-		unsigned long offset, nr;
-
-		offset = pos & ~PAGE_CACHE_MASK;
-		nr = PAGE_CACHE_SIZE - offset;
-		if (nr > inode->i_size - pos)
-			nr = inode->i_size - pos;
-
 		/*
+		 * Ok, we have the page, and it's up-to-date, so
+		 * now we can copy it to user space...
+		 *
 		 * The actor routine returns how many bytes were actually used..
 		 * NOTE! This may not be the same as how much of a user buffer
 		 * we filled up (we may be padding etc), so we can only update
@@ -1044,20 +1045,20 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 		 * pointers and the remaining count).
 		 */
 		nr = actor(desc, page, offset, nr);
-		pos += nr;
-		pgoff = pos >> PAGE_CACHE_SHIFT;
+		offset += nr;
+		index += offset >> PAGE_CACHE_SHIFT;
+		offset &= ~PAGE_CACHE_MASK;
+	
 		page_cache_release(page);
 		if (nr && desc->count)
 			continue;
 		break;
-	}
 
 /*
  * Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
  */
 page_not_up_to_date:
-		generic_file_readahead(reada_ok, filp, inode,
-					pos & PAGE_CACHE_MASK, page);
+		generic_file_readahead(reada_ok, filp, inode, page);
 
 		if (Page_Uptodate(page))
 			goto page_ok;
@@ -1078,8 +1079,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 				goto page_ok;
 
 			/* Again, try some read-ahead while waiting for the page to finish.. */
-			generic_file_readahead(reada_ok, filp, inode,
-						pos & PAGE_CACHE_MASK, page);
+			generic_file_readahead(reada_ok, filp, inode, page);
 			wait_on_page(page);
 			if (Page_Uptodate(page))
 				goto page_ok;
@@ -1111,7 +1111,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 			 * dropped the page cache lock. Check for that.
 			 */
 			spin_lock(&pagecache_lock);
-			page = __find_page_nolock(&inode->i_data, pgoff, *hash);
+			page = __find_page_nolock(&inode->i_data, index, *hash);
 			if (page)
 				goto found_page;
 		}
@@ -1120,14 +1120,14 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
 		 * Ok, add the new page to the hash-queues...
 		 */
 		page = cached_page;
-		__add_to_page_cache(page, &inode->i_data, pgoff, hash);
+		__add_to_page_cache(page, &inode->i_data, index, hash);
 		spin_unlock(&pagecache_lock);
 		cached_page = NULL;
 
 		goto readpage;
 	}
 
-	*ppos = pos;
+	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
 	filp->f_reada = 1;
 	if (cached_page)
 		page_cache_free(cached_page);