Commit 67a670bd authored by Linus Torvalds's avatar Linus Torvalds

Merge http://linux-isdn.bkbits.net/linux-2.5.make-next

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 8876c643 66ebd50b
...@@ -436,14 +436,15 @@ static int alloc_array_sb(mddev_t * mddev) ...@@ -436,14 +436,15 @@ static int alloc_array_sb(mddev_t * mddev)
static int alloc_disk_sb(mdk_rdev_t * rdev) static int alloc_disk_sb(mdk_rdev_t * rdev)
{ {
if (rdev->sb) if (rdev->sb_page)
MD_BUG(); MD_BUG();
rdev->sb = (mdp_super_t *) __get_free_page(GFP_KERNEL); rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb) { if (!rdev->sb_page) {
printk(OUT_OF_MEM); printk(OUT_OF_MEM);
return -EINVAL; return -EINVAL;
} }
rdev->sb = (mdp_super_t *) page_address(rdev->sb_page);
clear_page(rdev->sb); clear_page(rdev->sb);
return 0; return 0;
...@@ -451,9 +452,10 @@ static int alloc_disk_sb(mdk_rdev_t * rdev) ...@@ -451,9 +452,10 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
static void free_disk_sb(mdk_rdev_t * rdev) static void free_disk_sb(mdk_rdev_t * rdev)
{ {
if (rdev->sb) { if (rdev->sb_page) {
free_page((unsigned long) rdev->sb); page_cache_release(rdev->sb_page);
rdev->sb = NULL; rdev->sb = NULL;
rdev->sb_page = NULL;
rdev->sb_offset = 0; rdev->sb_offset = 0;
rdev->size = 0; rdev->size = 0;
} else { } else {
...@@ -462,13 +464,42 @@ static void free_disk_sb(mdk_rdev_t * rdev) ...@@ -462,13 +464,42 @@ static void free_disk_sb(mdk_rdev_t * rdev)
} }
} }
static void bi_complete(struct bio *bio)
{
complete((struct completion*)bio->bi_private);
}
static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct page *page, int rw)
{
struct bio bio;
struct bio_vec vec;
struct completion event;
bio_init(&bio);
bio.bi_io_vec = &vec;
vec.bv_page = page;
vec.bv_len = size;
vec.bv_offset = 0;
bio.bi_vcnt = 1;
bio.bi_idx = 0;
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = sector;
init_completion(&event);
bio.bi_private = &event;
bio.bi_end_io = bi_complete;
submit_bio(rw, &bio);
run_task_queue(&tq_disk);
wait_for_completion(&event);
return test_bit(BIO_UPTODATE, &bio.bi_flags);
}
static int read_disk_sb(mdk_rdev_t * rdev) static int read_disk_sb(mdk_rdev_t * rdev)
{ {
struct address_space *mapping = rdev->bdev->bd_inode->i_mapping;
struct page *page;
char *p;
unsigned long sb_offset; unsigned long sb_offset;
int n = PAGE_CACHE_SIZE / BLOCK_SIZE;
if (!rdev->sb) { if (!rdev->sb) {
MD_BUG(); MD_BUG();
...@@ -483,24 +514,14 @@ static int read_disk_sb(mdk_rdev_t * rdev) ...@@ -483,24 +514,14 @@ static int read_disk_sb(mdk_rdev_t * rdev)
*/ */
sb_offset = calc_dev_sboffset(rdev->dev, rdev->mddev, 1); sb_offset = calc_dev_sboffset(rdev->dev, rdev->mddev, 1);
rdev->sb_offset = sb_offset; rdev->sb_offset = sb_offset;
page = read_cache_page(mapping, sb_offset/n,
(filler_t *)mapping->a_ops->readpage, NULL); if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
if (IS_ERR(page))
goto out;
wait_on_page_locked(page);
if (!PageUptodate(page))
goto fail;
if (PageError(page))
goto fail; goto fail;
p = (char *)page_address(page) + BLOCK_SIZE * (sb_offset % n);
memcpy((char*)rdev->sb, p, MD_SB_BYTES);
page_cache_release(page);
printk(KERN_INFO " [events: %08lx]\n", (unsigned long)rdev->sb->events_lo); printk(KERN_INFO " [events: %08lx]\n", (unsigned long)rdev->sb->events_lo);
return 0; return 0;
fail: fail:
page_cache_release(page);
out:
printk(NO_SB,partition_name(rdev->dev)); printk(NO_SB,partition_name(rdev->dev));
return -EINVAL; return -EINVAL;
} }
...@@ -893,11 +914,6 @@ static mdk_rdev_t * find_rdev_all(kdev_t dev) ...@@ -893,11 +914,6 @@ static mdk_rdev_t * find_rdev_all(kdev_t dev)
static int write_disk_sb(mdk_rdev_t * rdev) static int write_disk_sb(mdk_rdev_t * rdev)
{ {
struct block_device *bdev = rdev->bdev;
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page;
unsigned offs;
int error;
kdev_t dev = rdev->dev; kdev_t dev = rdev->dev;
unsigned long sb_offset, size; unsigned long sb_offset, size;
...@@ -933,29 +949,11 @@ static int write_disk_sb(mdk_rdev_t * rdev) ...@@ -933,29 +949,11 @@ static int write_disk_sb(mdk_rdev_t * rdev)
} }
printk(KERN_INFO "(write) %s's sb offset: %ld\n", partition_name(dev), sb_offset); printk(KERN_INFO "(write) %s's sb offset: %ld\n", partition_name(dev), sb_offset);
fsync_bdev(bdev);
page = grab_cache_page(mapping, sb_offset/(PAGE_CACHE_SIZE/BLOCK_SIZE)); if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
offs = sb_offset % (PAGE_CACHE_SIZE/BLOCK_SIZE);
if (!page)
goto fail; goto fail;
error = mapping->a_ops->prepare_write(NULL, page, offs,
offs + MD_SB_BYTES);
if (error)
goto unlock;
memcpy((char *)page_address(page) + offs, rdev->sb, MD_SB_BYTES);
error = mapping->a_ops->commit_write(NULL, page, offs,
offs + MD_SB_BYTES);
if (error)
goto unlock;
unlock_page(page);
wait_on_page_locked(page);
page_cache_release(page);
fsync_bdev(bdev);
skip: skip:
return 0; return 0;
unlock:
unlock_page(page);
page_cache_release(page);
fail: fail:
printk("md: write_disk_sb failed for device %s\n", partition_name(dev)); printk("md: write_disk_sb failed for device %s\n", partition_name(dev));
return 1; return 1;
......
This diff is collapsed.
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#define BH_TRACE 0 #define BH_TRACE 0
#include <linux/module.h> #include <linux/module.h>
#include <linux/raid/md.h> #include <linux/raid/md.h>
#include <linux/raid/md_compatible.h>
#include <linux/raid/xor.h> #include <linux/raid/xor.h>
#include <asm/xor.h> #include <asm/xor.h>
...@@ -27,31 +26,30 @@ ...@@ -27,31 +26,30 @@
static struct xor_block_template *active_template; static struct xor_block_template *active_template;
void void
xor_block(unsigned int count, struct buffer_head **bh_ptr) xor_block(unsigned int count, unsigned int bytes, void **ptr)
{ {
unsigned long *p0, *p1, *p2, *p3, *p4; unsigned long *p0, *p1, *p2, *p3, *p4;
unsigned long bytes = bh_ptr[0]->b_size;
p0 = (unsigned long *) bh_ptr[0]->b_data; p0 = (unsigned long *) ptr[0];
p1 = (unsigned long *) bh_ptr[1]->b_data; p1 = (unsigned long *) ptr[1];
if (count == 2) { if (count == 2) {
active_template->do_2(bytes, p0, p1); active_template->do_2(bytes, p0, p1);
return; return;
} }
p2 = (unsigned long *) bh_ptr[2]->b_data; p2 = (unsigned long *) ptr[2];
if (count == 3) { if (count == 3) {
active_template->do_3(bytes, p0, p1, p2); active_template->do_3(bytes, p0, p1, p2);
return; return;
} }
p3 = (unsigned long *) bh_ptr[3]->b_data; p3 = (unsigned long *) ptr[3];
if (count == 4) { if (count == 4) {
active_template->do_4(bytes, p0, p1, p2, p3); active_template->do_4(bytes, p0, p1, p2, p3);
return; return;
} }
p4 = (unsigned long *) bh_ptr[4]->b_data; p4 = (unsigned long *) ptr[4];
active_template->do_5(bytes, p0, p1, p2, p3, p4); active_template->do_5(bytes, p0, p1, p2, p3, p4);
} }
...@@ -103,7 +101,7 @@ calibrate_xor_block(void) ...@@ -103,7 +101,7 @@ calibrate_xor_block(void)
void *b1, *b2; void *b1, *b2;
struct xor_block_template *f, *fastest; struct xor_block_template *f, *fastest;
b1 = (void *) md__get_free_pages(GFP_KERNEL, 2); b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (! b1) { if (! b1) {
printk("raid5: Yikes! No memory available.\n"); printk("raid5: Yikes! No memory available.\n");
return -ENOMEM; return -ENOMEM;
...@@ -137,7 +135,7 @@ calibrate_xor_block(void) ...@@ -137,7 +135,7 @@ calibrate_xor_block(void)
return 0; return 0;
} }
MD_EXPORT_SYMBOL(xor_block); EXPORT_SYMBOL(xor_block);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(calibrate_xor_block); module_init(calibrate_xor_block);
...@@ -105,9 +105,8 @@ int presto_set_ext_attr(struct inode *inode, ...@@ -105,9 +105,8 @@ int presto_set_ext_attr(struct inode *inode,
printk("InterMezzo: out of memory!!!\n"); printk("InterMezzo: out of memory!!!\n");
return -ENOMEM; return -ENOMEM;
} }
error = copy_from_user(buf, buffer, buffer_len); if (copy_from_user(buf, buffer, buffer_len))
if (error) return -EFAULT;
return error;
} else } else
buf = buffer; buf = buffer;
} else } else
......
...@@ -31,10 +31,9 @@ int begin_kml_reint (struct file *file, unsigned long arg) ...@@ -31,10 +31,9 @@ int begin_kml_reint (struct file *file, unsigned long arg)
ENTRY; ENTRY;
/* allocate buffer & copy it to kernel space */ /* allocate buffer & copy it to kernel space */
error = copy_from_user(&input, (char *)arg, sizeof(input)); if (copy_from_user(&input, (char *)arg, sizeof(input))) {
if ( error ) {
EXIT; EXIT;
return error; return -EFAULT;
} }
if (input.reclen > kml_fsdata->kml_maxsize) if (input.reclen > kml_fsdata->kml_maxsize)
...@@ -45,11 +44,10 @@ int begin_kml_reint (struct file *file, unsigned long arg) ...@@ -45,11 +44,10 @@ int begin_kml_reint (struct file *file, unsigned long arg)
EXIT; EXIT;
return -ENOMEM; return -ENOMEM;
} }
error = copy_from_user(path, input.volname, input.namelen); if (copy_from_user(path, input.volname, input.namelen)) {
if ( error ) {
PRESTO_FREE(path, input.namelen + 1); PRESTO_FREE(path, input.namelen + 1);
EXIT; EXIT;
return error; return -EFAULT;
} }
path[input.namelen] = '\0'; path[input.namelen] = '\0';
fset = kml_getfset (path); fset = kml_getfset (path);
...@@ -57,10 +55,9 @@ int begin_kml_reint (struct file *file, unsigned long arg) ...@@ -57,10 +55,9 @@ int begin_kml_reint (struct file *file, unsigned long arg)
kml_fsdata = FSET_GET_KMLDATA(fset); kml_fsdata = FSET_GET_KMLDATA(fset);
/* read the buf from user memory here */ /* read the buf from user memory here */
error = copy_from_user(kml_fsdata->kml_buf, input.recbuf, input.reclen); if (copy_from_user(kml_fsdata->kml_buf, input.recbuf, input.reclen)) {
if ( error ) {
EXIT; EXIT;
return error; return -EFAULT;
} }
kml_fsdata->kml_len = input.reclen; kml_fsdata->kml_len = input.reclen;
...@@ -94,21 +91,19 @@ int do_kml_reint (struct file *file, unsigned long arg) ...@@ -94,21 +91,19 @@ int do_kml_reint (struct file *file, unsigned long arg)
struct presto_file_set *fset; struct presto_file_set *fset;
ENTRY; ENTRY;
error = copy_from_user(&input, (char *)arg, sizeof(input)); if (copy_from_user(&input, (char *)arg, sizeof(input))) {
if ( error ) {
EXIT; EXIT;
return error; return -EFAULT;
} }
PRESTO_ALLOC(path, char *, input.namelen + 1); PRESTO_ALLOC(path, char *, input.namelen + 1);
if ( !path ) { if ( !path ) {
EXIT; EXIT;
return -ENOMEM; return -ENOMEM;
} }
error = copy_from_user(path, input.volname, input.namelen); if (copy_from_user(path, input.volname, input.namelen)) {
if ( error ) {
PRESTO_FREE(path, input.namelen + 1); PRESTO_FREE(path, input.namelen + 1);
EXIT; EXIT;
return error; return -EFAULT;
} }
path[input.namelen] = '\0'; path[input.namelen] = '\0';
fset = kml_getfset (path); fset = kml_getfset (path);
...@@ -138,7 +133,8 @@ int do_kml_reint (struct file *file, unsigned long arg) ...@@ -138,7 +133,8 @@ int do_kml_reint (struct file *file, unsigned long arg)
strlen (close->path) + 1, input.pathlen); strlen (close->path) + 1, input.pathlen);
error = -ENOMEM; error = -ENOMEM;
} }
copy_to_user((char *)arg, &input, sizeof (input)); if (copy_to_user((char *)arg, &input, sizeof (input)))
return -EFAULT;
} }
return error; return error;
} }
...@@ -161,10 +157,9 @@ int end_kml_reint (struct file *file, unsigned long arg) ...@@ -161,10 +157,9 @@ int end_kml_reint (struct file *file, unsigned long arg)
char *path; char *path;
ENTRY; ENTRY;
error = copy_from_user(&input, (char *)arg, sizeof(input)); if (copy_from_user(&input, (char *)arg, sizeof(input))) {
if ( error ) {
EXIT; EXIT;
return error; return -EFAULT;
} }
PRESTO_ALLOC(path, char *, input.namelen + 1); PRESTO_ALLOC(path, char *, input.namelen + 1);
...@@ -172,11 +167,11 @@ int end_kml_reint (struct file *file, unsigned long arg) ...@@ -172,11 +167,11 @@ int end_kml_reint (struct file *file, unsigned long arg)
EXIT; EXIT;
return -ENOMEM; return -ENOMEM;
} }
error = copy_from_user(path, input.volname, input.namelen); if (copy_from_user(path, input.volname, input.namelen)) {
if ( error ) { if ( error ) {
PRESTO_FREE(path, input.namelen + 1); PRESTO_FREE(path, input.namelen + 1);
EXIT; EXIT;
return error; return -EFAULT;
} }
path[input.namelen] = '\0'; path[input.namelen] = '\0';
fset = kml_getfset (path); fset = kml_getfset (path);
...@@ -193,7 +188,8 @@ int end_kml_reint (struct file *file, unsigned long arg) ...@@ -193,7 +188,8 @@ int end_kml_reint (struct file *file, unsigned long arg)
#if 0 #if 0
input.newpos = kml_upc->newpos; input.newpos = kml_upc->newpos;
input.count = kml_upc->count; input.count = kml_upc->count;
copy_to_user((char *)arg, &input, sizeof (input)); if (copy_to_user((char *)arg, &input, sizeof (input)))
return -EFAULT;
#endif #endif
return error; return error;
} }
This diff is collapsed.
...@@ -315,17 +315,19 @@ exp_export(struct nfsctl_export *nxp) ...@@ -315,17 +315,19 @@ exp_export(struct nfsctl_export *nxp)
* 2: We must be able to find an inode from a filehandle. * 2: We must be able to find an inode from a filehandle.
* This means that s_export_op must be set. * This means that s_export_op must be set.
*/ */
if (((inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV)) {
|| (nxp->ex_flags & NFSEXP_FSID)) if (!(nxp->ex_flags & NFSEXP_FSID)) {
&& dprintk("exp_export: export of non-dev fs without fsid");
inode->i_sb->s_export_op) goto finish;
/* Ok, we can export it */; }
else { }
if (!inode->i_sb->s_export_op) {
dprintk("exp_export: export of invalid fs type.\n"); dprintk("exp_export: export of invalid fs type.\n");
goto finish; goto finish;
} }
if (inode->i_sb->s_export_op &&
!inode->i_sb->s_export_op->find_exported_dentry) /* Ok, we can export it */;
if (!inode->i_sb->s_export_op->find_exported_dentry)
inode->i_sb->s_export_op->find_exported_dentry = inode->i_sb->s_export_op->find_exported_dentry =
find_exported_dentry; find_exported_dentry;
......
...@@ -414,11 +414,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, ...@@ -414,11 +414,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
{ {
struct dentry *dentry; struct dentry *dentry;
struct inode *inode; struct inode *inode;
int err; int flags = O_RDONLY|O_LARGEFILE, mode = FMODE_READ, err;
/* If we get here, then the client has already done an "open", and (hopefully) /*
* checked permission - so allow OWNER_OVERRIDE in case a chmod has now revoked * If we get here, then the client has already done an "open",
* permission */ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission.
*/
err = fh_verify(rqstp, fhp, type, access | MAY_OWNER_OVERRIDE); err = fh_verify(rqstp, fhp, type, access | MAY_OWNER_OVERRIDE);
if (err) if (err)
goto out; goto out;
...@@ -443,37 +445,24 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, ...@@ -443,37 +445,24 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (err) if (err)
goto out_nfserr; goto out_nfserr;
if ((access & MAY_WRITE) && (err = get_write_access(inode)) != 0)
goto out_nfserr;
memset(filp, 0, sizeof(*filp));
filp->f_op = fops_get(inode->i_fop);
atomic_set(&filp->f_count, 1);
filp->f_dentry = dentry;
filp->f_vfsmnt = fhp->fh_export->ex_mnt;
if (access & MAY_WRITE) { if (access & MAY_WRITE) {
filp->f_flags = O_WRONLY|O_LARGEFILE; err = get_write_access(inode);
filp->f_mode = FMODE_WRITE; if (err)
goto out_nfserr;
flags = O_WRONLY|O_LARGEFILE;
mode = FMODE_WRITE;
DQUOT_INIT(inode); DQUOT_INIT(inode);
} else {
filp->f_flags = O_RDONLY|O_LARGEFILE;
filp->f_mode = FMODE_READ;
} }
err = 0; err = init_private_file(filp, dentry, mode);
if (filp->f_op && filp->f_op->open) { if (!err) {
err = filp->f_op->open(inode, filp); filp->f_flags = flags;
if (err) { filp->f_vfsmnt = fhp->fh_export->ex_mnt;
fops_put(filp->f_op); } else if (access & MAY_WRITE)
if (access & MAY_WRITE) put_write_access(inode);
put_write_access(inode);
/* I nearly added put_filp() call here, but this filp
* is really on callers stack frame. -DaveM
*/
atomic_dec(&filp->f_count);
}
}
out_nfserr: out_nfserr:
if (err) if (err)
err = nfserrno(err); err = nfserrno(err);
...@@ -490,9 +479,8 @@ nfsd_close(struct file *filp) ...@@ -490,9 +479,8 @@ nfsd_close(struct file *filp)
struct dentry *dentry = filp->f_dentry; struct dentry *dentry = filp->f_dentry;
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
if (filp->f_op && filp->f_op->release) if (filp->f_op->release)
filp->f_op->release(inode, filp); filp->f_op->release(inode, filp);
fops_put(filp->f_op);
if (filp->f_mode & FMODE_WRITE) if (filp->f_mode & FMODE_WRITE)
put_write_access(inode); put_write_access(inode);
} }
......
...@@ -162,57 +162,107 @@ asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high, ...@@ -162,57 +162,107 @@ asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
} }
#endif #endif
asmlinkage ssize_t sys_read(unsigned int fd, char * buf, size_t count) ssize_t vfs_read(struct file *file, char *buf, size_t count, loff_t *pos)
{ {
struct inode *inode = file->f_dentry->d_inode;
ssize_t ret; ssize_t ret;
struct file * file;
ret = -EBADF; if (!(file->f_mode & FMODE_READ))
file = fget(fd); return -EBADF;
if (file) { if (!file->f_op || !file->f_op->read)
if (file->f_mode & FMODE_READ) { return -EINVAL;
ret = locks_verify_area(FLOCK_VERIFY_READ, file->f_dentry->d_inode, if (pos < 0)
file, file->f_pos, count); return -EINVAL;
if (!ret) {
ssize_t (*read)(struct file *, char *, size_t, loff_t *); ret = locks_verify_area(FLOCK_VERIFY_READ, inode, file, *pos, count);
ret = -EINVAL; if (!ret) {
if (file->f_op && (read = file->f_op->read) != NULL) ret = file->f_op->read(file, buf, count, pos);
ret = read(file, buf, count, &file->f_pos);
}
}
if (ret > 0) if (ret > 0)
dnotify_parent(file->f_dentry, DN_ACCESS); dnotify_parent(file->f_dentry, DN_ACCESS);
}
return ret;
}
ssize_t vfs_write(struct file *file, const char *buf, size_t count, loff_t *pos)
{
struct inode *inode = file->f_dentry->d_inode;
ssize_t ret;
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
if (!file->f_op || !file->f_op->write)
return -EINVAL;
if (pos < 0)
return -EINVAL;
ret = locks_verify_area(FLOCK_VERIFY_WRITE, inode, file, *pos, count);
if (!ret) {
ret = file->f_op->write(file, buf, count, pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
}
return ret;
}
asmlinkage ssize_t sys_read(unsigned int fd, char * buf, size_t count)
{
struct file *file;
ssize_t ret = -EBADF;
file = fget(fd);
if (file) {
ret = vfs_read(file, buf, count, &file->f_pos);
fput(file); fput(file);
} }
return ret; return ret;
} }
asmlinkage ssize_t sys_write(unsigned int fd, const char * buf, size_t count) asmlinkage ssize_t sys_write(unsigned int fd, const char * buf, size_t count)
{ {
ssize_t ret; struct file *file;
struct file * file; ssize_t ret = -EBADF;
ret = -EBADF;
file = fget(fd); file = fget(fd);
if (file) { if (file) {
if (file->f_mode & FMODE_WRITE) { ret = vfs_write(file, buf, count, &file->f_pos);
struct inode *inode = file->f_dentry->d_inode;
ret = locks_verify_area(FLOCK_VERIFY_WRITE, inode, file,
file->f_pos, count);
if (!ret) {
ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
ret = -EINVAL;
if (file->f_op && (write = file->f_op->write) != NULL)
ret = write(file, buf, count, &file->f_pos);
}
}
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
fput(file); fput(file);
} }
return ret; return ret;
} }
asmlinkage ssize_t sys_pread(unsigned int fd, char *buf,
size_t count, loff_t pos)
{
struct file *file;
ssize_t ret = -EBADF;
file = fget(fd);
if (file) {
ret = vfs_read(file, buf, count, &pos);
fput(file);
}
return ret;
}
asmlinkage ssize_t sys_pwrite(unsigned int fd, const char *buf,
size_t count, loff_t pos)
{
struct file *file;
ssize_t ret = -EBADF;
file = fget(fd);
if (file) {
ret = vfs_write(file, buf, count, &pos);
fput(file);
}
return ret;
}
static ssize_t do_readv_writev(int type, struct file *file, static ssize_t do_readv_writev(int type, struct file *file,
const struct iovec * vector, const struct iovec * vector,
...@@ -355,70 +405,3 @@ asmlinkage ssize_t sys_writev(unsigned long fd, const struct iovec * vector, ...@@ -355,70 +405,3 @@ asmlinkage ssize_t sys_writev(unsigned long fd, const struct iovec * vector,
bad_file: bad_file:
return ret; return ret;
} }
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
non-seekable files. */
asmlinkage ssize_t sys_pread(unsigned int fd, char * buf,
size_t count, loff_t pos)
{
ssize_t ret;
struct file * file;
ssize_t (*read)(struct file *, char *, size_t, loff_t *);
ret = -EBADF;
file = fget(fd);
if (!file)
goto bad_file;
if (!(file->f_mode & FMODE_READ))
goto out;
ret = locks_verify_area(FLOCK_VERIFY_READ, file->f_dentry->d_inode,
file, pos, count);
if (ret)
goto out;
ret = -EINVAL;
if (!file->f_op || !(read = file->f_op->read))
goto out;
if (pos < 0)
goto out;
ret = read(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_ACCESS);
out:
fput(file);
bad_file:
return ret;
}
asmlinkage ssize_t sys_pwrite(unsigned int fd, const char * buf,
size_t count, loff_t pos)
{
ssize_t ret;
struct file * file;
ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
ret = -EBADF;
file = fget(fd);
if (!file)
goto bad_file;
if (!(file->f_mode & FMODE_WRITE))
goto out;
ret = locks_verify_area(FLOCK_VERIFY_WRITE, file->f_dentry->d_inode,
file, pos, count);
if (ret)
goto out;
ret = -EINVAL;
if (!file->f_op || !(write = file->f_op->write))
goto out;
if (pos < 0)
goto out;
ret = write(file, buf, count, &pos);
if (ret > 0)
dnotify_parent(file->f_dentry, DN_MODIFY);
out:
fput(file);
bad_file:
return ret;
}
...@@ -1484,13 +1484,19 @@ static int reiserfs_new_symlink (struct reiserfs_transaction_handle *th, ...@@ -1484,13 +1484,19 @@ static int reiserfs_new_symlink (struct reiserfs_transaction_handle *th,
/* inserts the stat data into the tree, and then calls /* inserts the stat data into the tree, and then calls
reiserfs_new_directory (to insert ".", ".." item if new object is reiserfs_new_directory (to insert ".", ".." item if new object is
directory) or reiserfs_new_symlink (to insert symlink body if new directory) or reiserfs_new_symlink (to insert symlink body if new
object is symlink) or nothing (if new object is regular file) */ object is symlink) or nothing (if new object is regular file)
struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct inode * dir, int mode, NOTE! uid and gid must already be set in the inode. If we return
const char * symname, non-zero due to an error, we have to drop the quota previously allocated
int i_size, /* 0 for regular, EMTRY_DIR_SIZE for dirs, for the fresh inode. This can only be done outside a transaction, so
strlen (symname) for symlinks)*/ if we return non-zero, we also end the transaction. */
struct dentry *dentry, struct inode *inode, int * err) int reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct inode * dir, int mode,
const char * symname,
/* 0 for regular, EMTRY_DIR_SIZE for dirs,
strlen (symname) for symlinks)*/
loff_t i_size, struct dentry *dentry,
struct inode *inode)
{ {
struct super_block * sb; struct super_block * sb;
INITIALIZE_PATH (path_to_key); INITIALIZE_PATH (path_to_key);
...@@ -1498,72 +1504,40 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, ...@@ -1498,72 +1504,40 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct item_head ih; struct item_head ih;
struct stat_data sd; struct stat_data sd;
int retval; int retval;
int err;
if (!dir || !dir->i_nlink) { if (!dir || !dir->i_nlink) {
*err = -EPERM; err = -EPERM;
iput(inode) ; goto out_bad_inode;
return NULL;
} }
sb = dir->i_sb; sb = dir->i_sb;
inode->i_flags = 0;//inode->i_sb->s_flags;
/* item head of new item */ /* item head of new item */
ih.ih_key.k_dir_id = INODE_PKEY (dir)->k_objectid; ih.ih_key.k_dir_id = INODE_PKEY (dir)->k_objectid;
ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th)); ih.ih_key.k_objectid = cpu_to_le32 (reiserfs_get_unused_objectid (th));
if (!ih.ih_key.k_objectid) { if (!ih.ih_key.k_objectid) {
iput(inode) ; err = -ENOMEM;
*err = -ENOMEM; goto out_bad_inode ;
return NULL;
} }
if (old_format_only (sb)) if (old_format_only (sb))
/* not a perfect generation count, as object ids can be reused, but this /* not a perfect generation count, as object ids can be reused, but
** is as good as reiserfs can do right now. ** this is as good as reiserfs can do right now.
** note that the private part of inode isn't filled in yet, we have ** note that the private part of inode isn't filled in yet, we have
** to use the directory. ** to use the directory.
*/ */
inode->i_generation = le32_to_cpu (INODE_PKEY (dir)->k_objectid); inode->i_generation = le32_to_cpu (INODE_PKEY (dir)->k_objectid);
else else
#if defined( USE_INODE_GENERATION_COUNTER ) #if defined( USE_INODE_GENERATION_COUNTER )
inode->i_generation = inode->i_generation = le32_to_cpu(REISERFS_SB(sb)->s_rs->s_inode_generation);
le32_to_cpu( REISERFS_SB(sb) -> s_rs -> s_inode_generation );
#else #else
inode->i_generation = ++event; inode->i_generation = ++event;
#endif #endif
if (old_format_only (sb))
make_le_item_head (&ih, 0, KEY_FORMAT_3_5, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
else
make_le_item_head (&ih, 0, KEY_FORMAT_3_6, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
/* key to search for correct place for new stat data */
_make_cpu_key (&key, KEY_FORMAT_3_6, le32_to_cpu (ih.ih_key.k_dir_id),
le32_to_cpu (ih.ih_key.k_objectid), SD_OFFSET, TYPE_STAT_DATA, 3/*key length*/);
/* find proper place for inserting of stat data */
retval = search_item (sb, &key, &path_to_key);
if (retval == IO_ERROR) {
iput (inode);
*err = -EIO;
return NULL;
}
if (retval == ITEM_FOUND) {
pathrelse (&path_to_key);
iput (inode);
*err = -EEXIST;
return NULL;
}
/* fill stat data */ /* fill stat data */
inode->i_mode = mode;
inode->i_nlink = (S_ISDIR (mode) ? 2 : 1); inode->i_nlink = (S_ISDIR (mode) ? 2 : 1);
inode->i_uid = current->fsuid;
if (dir->i_mode & S_ISGID) { /* uid and gid must already be set by the caller for quota init */
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
inode->i_mode |= S_ISGID;
} else
inode->i_gid = current->fsgid;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_size = i_size; inode->i_size = i_size;
...@@ -1578,18 +1552,38 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, ...@@ -1578,18 +1552,38 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
REISERFS_I(inode)->i_trans_id = 0; REISERFS_I(inode)->i_trans_id = 0;
REISERFS_I(inode)->i_trans_index = 0; REISERFS_I(inode)->i_trans_index = 0;
if (old_format_only (sb))
make_le_item_head (&ih, 0, KEY_FORMAT_3_5, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
else
make_le_item_head (&ih, 0, KEY_FORMAT_3_6, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
/* key to search for correct place for new stat data */
_make_cpu_key (&key, KEY_FORMAT_3_6, le32_to_cpu (ih.ih_key.k_dir_id),
le32_to_cpu (ih.ih_key.k_objectid), SD_OFFSET, TYPE_STAT_DATA, 3/*key length*/);
/* find proper place for inserting of stat data */
retval = search_item (sb, &key, &path_to_key);
if (retval == IO_ERROR) {
err = -EIO;
goto out_bad_inode;
}
if (retval == ITEM_FOUND) {
pathrelse (&path_to_key);
err = -EEXIST;
goto out_bad_inode;
}
if (old_format_only (sb)) { if (old_format_only (sb)) {
if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) { if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
pathrelse (&path_to_key); pathrelse (&path_to_key);
/* i_uid or i_gid is too big to be stored in stat data v3.5 */ /* i_uid or i_gid is too big to be stored in stat data v3.5 */
iput (inode); err = -EINVAL;
*err = -EINVAL; goto out_bad_inode;
return NULL;
} }
inode2sd_v1 (&sd, inode); inode2sd_v1 (&sd, inode);
} else } else {
inode2sd (&sd, inode); inode2sd (&sd, inode);
}
// these do not go to on-disk stat data // these do not go to on-disk stat data
inode->i_ino = le32_to_cpu (ih.ih_key.k_objectid); inode->i_ino = le32_to_cpu (ih.ih_key.k_objectid);
inode->i_blksize = PAGE_SIZE; inode->i_blksize = PAGE_SIZE;
...@@ -1610,10 +1604,9 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, ...@@ -1610,10 +1604,9 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
/* insert the stat data into the tree */ /* insert the stat data into the tree */
retval = reiserfs_insert_item (th, &path_to_key, &key, &ih, (char *)(&sd)); retval = reiserfs_insert_item (th, &path_to_key, &key, &ih, (char *)(&sd));
if (retval) { if (retval) {
iput (inode); err = retval;
*err = retval;
reiserfs_check_path(&path_to_key) ; reiserfs_check_path(&path_to_key) ;
return NULL; goto out_bad_inode;
} }
if (S_ISDIR(mode)) { if (S_ISDIR(mode)) {
...@@ -1628,19 +1621,35 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, ...@@ -1628,19 +1621,35 @@ struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
retval = reiserfs_new_symlink (th, &ih, &path_to_key, symname, i_size); retval = reiserfs_new_symlink (th, &ih, &path_to_key, symname, i_size);
} }
if (retval) { if (retval) {
inode->i_nlink = 0; err = retval;
iput (inode);
*err = retval;
reiserfs_check_path(&path_to_key) ; reiserfs_check_path(&path_to_key) ;
return NULL; journal_end(th, th->t_super, th->t_blocks_allocated);
goto out_inserted_sd;
} }
insert_inode_hash (inode); insert_inode_hash (inode);
// we do not mark inode dirty: on disk content matches to the reiserfs_update_sd(th, inode);
// in-core one
reiserfs_check_path(&path_to_key) ; reiserfs_check_path(&path_to_key) ;
return inode; return 0;
/* it looks like you can easily compress these two goto targets into
* one. Keeping it like this doesn't actually hurt anything, and they
* are place holders for what the quota code actually needs.
*/
out_bad_inode:
/* Invalidate the object, nothing was inserted yet */
INODE_PKEY(inode)->k_objectid = 0;
/* dquot_drop must be done outside a transaction */
journal_end(th, th->t_super, th->t_blocks_allocated) ;
make_bad_inode(inode);
out_inserted_sd:
inode->i_nlink = 0;
th->t_trans_id = 0; /* so the caller can't use this handle later */
iput(inode);
return err;
} }
/* /*
......
...@@ -204,7 +204,7 @@ static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block, ...@@ -204,7 +204,7 @@ static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
if (!jb->bitmaps[bmap_nr]) { if (!jb->bitmaps[bmap_nr]) {
jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ; jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
} }
set_bit(bit_nr, jb->bitmaps[bmap_nr]->data) ; set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
return 0 ; return 0 ;
} }
...@@ -550,7 +550,7 @@ int reiserfs_in_journal(struct super_block *p_s_sb, ...@@ -550,7 +550,7 @@ int reiserfs_in_journal(struct super_block *p_s_sb,
PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap ); PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ; jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
if (jb->journal_list && jb->bitmaps[bmap_nr] && if (jb->journal_list && jb->bitmaps[bmap_nr] &&
test_bit(bit_nr, jb->bitmaps[bmap_nr]->data)) { test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
tmp_bit = find_next_zero_bit((unsigned long *) tmp_bit = find_next_zero_bit((unsigned long *)
(jb->bitmaps[bmap_nr]->data), (jb->bitmaps[bmap_nr]->data),
p_s_sb->s_blocksize << 3, bit_nr+1) ; p_s_sb->s_blocksize << 3, bit_nr+1) ;
......
...@@ -248,7 +248,7 @@ static int linear_search_in_dir_item (struct cpu_key * key, struct reiserfs_dir_ ...@@ -248,7 +248,7 @@ static int linear_search_in_dir_item (struct cpu_key * key, struct reiserfs_dir_
/* mark, that this generation number is used */ /* mark, that this generation number is used */
if (de->de_gen_number_bit_string) if (de->de_gen_number_bit_string)
set_bit (GET_GENERATION_NUMBER (deh_offset (deh)), de->de_gen_number_bit_string); set_bit (GET_GENERATION_NUMBER (deh_offset (deh)), (unsigned long *)de->de_gen_number_bit_string);
// calculate pointer to name and namelen // calculate pointer to name and namelen
de->de_entry_num = i; de->de_entry_num = i;
...@@ -504,7 +504,7 @@ static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct in ...@@ -504,7 +504,7 @@ static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct in
return -EEXIST; return -EEXIST;
} }
gen_number = find_first_zero_bit (bit_string, MAX_GENERATION_NUMBER + 1); gen_number = find_first_zero_bit ((unsigned long *)bit_string, MAX_GENERATION_NUMBER + 1);
if (gen_number > MAX_GENERATION_NUMBER) { if (gen_number > MAX_GENERATION_NUMBER) {
/* there is no free generation number */ /* there is no free generation number */
reiserfs_warning ("reiserfs_add_entry: Congratulations! we have got hash function screwed up\n"); reiserfs_warning ("reiserfs_add_entry: Congratulations! we have got hash function screwed up\n");
...@@ -552,6 +552,40 @@ static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct in ...@@ -552,6 +552,40 @@ static int reiserfs_add_entry (struct reiserfs_transaction_handle *th, struct in
return 0; return 0;
} }
/* quota utility function, call if you've had to abort after calling
** new_inode_init, and have not called reiserfs_new_inode yet.
** This should only be called on inodes that do not hav stat data
** inserted into the tree yet.
*/
static int drop_new_inode(struct inode *inode) {
make_bad_inode(inode) ;
iput(inode) ;
return 0 ;
}
/* utility function that does setup for reiserfs_new_inode.
** DQUOT_ALLOC_INODE cannot be called inside a transaction, so we had
** to pull some bits of reiserfs_new_inode out into this func.
** Yes, the actual quota calls are missing, they are part of the quota
** patch.
*/
static int new_inode_init(struct inode *inode, struct inode *dir, int mode) {
/* the quota init calls have to know who to charge the quota to, so
** we have to set uid and gid here
*/
inode->i_uid = current->fsuid;
inode->i_mode = mode;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
inode->i_mode |= S_ISGID;
} else {
inode->i_gid = current->fsgid;
}
return 0 ;
}
// //
// a portion of this function, particularly the VFS interface portion, // a portion of this function, particularly the VFS interface portion,
...@@ -564,7 +598,6 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode) ...@@ -564,7 +598,6 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
{ {
int retval; int retval;
struct inode * inode; struct inode * inode;
int windex ;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 ; int jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 ;
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
...@@ -572,16 +605,16 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode) ...@@ -572,16 +605,16 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
if (!inode) { if (!inode) {
return -ENOMEM ; return -ENOMEM ;
} }
retval = new_inode_init(inode, dir, mode);
if (retval)
return retval;
lock_kernel(); lock_kernel();
journal_begin(&th, dir->i_sb, jbegin_count) ; journal_begin(&th, dir->i_sb, jbegin_count) ;
th.t_caller = "create" ; th.t_caller = "create" ;
windex = push_journal_writer("reiserfs_create") ; retval = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode);
inode = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode, &retval); if (retval) {
if (!inode) { goto out_failed;
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ;
unlock_kernel();
return retval;
} }
inode->i_op = &reiserfs_file_inode_operations; inode->i_op = &reiserfs_file_inode_operations;
...@@ -593,22 +626,19 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode) ...@@ -593,22 +626,19 @@ static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
if (retval) { if (retval) {
inode->i_nlink--; inode->i_nlink--;
reiserfs_update_sd (&th, inode); reiserfs_update_sd (&th, inode);
pop_journal_writer(windex) ;
// FIXME: should we put iput here and have stat data deleted
// in the same transactioin
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
iput (inode); iput (inode);
unlock_kernel(); goto out_failed;
return retval;
} }
reiserfs_update_inode_transaction(inode) ; reiserfs_update_inode_transaction(inode) ;
reiserfs_update_inode_transaction(dir) ; reiserfs_update_inode_transaction(dir) ;
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
out_failed:
unlock_kernel(); unlock_kernel();
return 0; return retval;
} }
...@@ -623,7 +653,6 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, ...@@ -623,7 +653,6 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode,
{ {
int retval; int retval;
struct inode * inode; struct inode * inode;
int windex ;
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3; int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
...@@ -631,16 +660,16 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, ...@@ -631,16 +660,16 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode,
if (!inode) { if (!inode) {
return -ENOMEM ; return -ENOMEM ;
} }
retval = new_inode_init(inode, dir, mode);
if (retval)
return retval;
lock_kernel(); lock_kernel();
journal_begin(&th, dir->i_sb, jbegin_count) ; journal_begin(&th, dir->i_sb, jbegin_count) ;
windex = push_journal_writer("reiserfs_mknod") ;
inode = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode, &retval); retval = reiserfs_new_inode (&th, dir, mode, 0, 0/*i_size*/, dentry, inode);
if (!inode) { if (retval) {
pop_journal_writer(windex) ; goto out_failed;
journal_end(&th, dir->i_sb, jbegin_count) ;
unlock_kernel();
return retval;
} }
init_special_inode(inode, mode, rdev) ; init_special_inode(inode, mode, rdev) ;
...@@ -656,18 +685,17 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, ...@@ -656,18 +685,17 @@ static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode,
if (retval) { if (retval) {
inode->i_nlink--; inode->i_nlink--;
reiserfs_update_sd (&th, inode); reiserfs_update_sd (&th, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
iput (inode); iput (inode);
unlock_kernel(); goto out_failed;
return retval;
} }
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
out_failed:
unlock_kernel(); unlock_kernel();
return 0; return retval;
} }
...@@ -682,33 +710,33 @@ static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode) ...@@ -682,33 +710,33 @@ static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
{ {
int retval; int retval;
struct inode * inode; struct inode * inode;
int windex ;
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3; int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
mode = S_IFDIR | mode;
inode = new_inode(dir->i_sb) ; inode = new_inode(dir->i_sb) ;
if (!inode) { if (!inode) {
return -ENOMEM ; return -ENOMEM ;
} }
retval = new_inode_init(inode, dir, mode);
if (retval)
return retval;
lock_kernel(); lock_kernel();
journal_begin(&th, dir->i_sb, jbegin_count) ; journal_begin(&th, dir->i_sb, jbegin_count) ;
windex = push_journal_writer("reiserfs_mkdir") ;
/* inc the link count now, so another writer doesn't overflow it while /* inc the link count now, so another writer doesn't overflow it while
** we sleep later on. ** we sleep later on.
*/ */
INC_DIR_INODE_NLINK(dir) INC_DIR_INODE_NLINK(dir)
mode = S_IFDIR | mode; retval = reiserfs_new_inode (&th, dir, mode, 0/*symlink*/,
inode = reiserfs_new_inode (&th, dir, mode, 0/*symlink*/, old_format_only (dir->i_sb) ?
old_format_only (dir->i_sb) ? EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
dentry, inode, &retval); dentry, inode);
if (!inode) { if (retval) {
pop_journal_writer(windex) ;
dir->i_nlink-- ; dir->i_nlink-- ;
journal_end(&th, dir->i_sb, jbegin_count) ; goto out_failed;
unlock_kernel();
return retval;
} }
reiserfs_update_inode_transaction(inode) ; reiserfs_update_inode_transaction(inode) ;
reiserfs_update_inode_transaction(dir) ; reiserfs_update_inode_transaction(dir) ;
...@@ -723,21 +751,19 @@ static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode) ...@@ -723,21 +751,19 @@ static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
inode->i_nlink = 0; inode->i_nlink = 0;
DEC_DIR_INODE_NLINK(dir); DEC_DIR_INODE_NLINK(dir);
reiserfs_update_sd (&th, inode); reiserfs_update_sd (&th, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
iput (inode); iput (inode);
unlock_kernel(); goto out_failed;
return retval;
} }
// the above add_entry did not update dir's stat data // the above add_entry did not update dir's stat data
reiserfs_update_sd (&th, dir); reiserfs_update_sd (&th, dir);
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
out_failed:
unlock_kernel(); unlock_kernel();
return 0; return retval;
} }
static inline int reiserfs_empty_dir(struct inode *inode) { static inline int reiserfs_empty_dir(struct inode *inode) {
...@@ -942,43 +968,43 @@ static int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const c ...@@ -942,43 +968,43 @@ static int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const c
struct inode * inode; struct inode * inode;
char * name; char * name;
int item_len; int item_len;
int windex ;
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
int mode = S_IFLNK | S_IRWXUGO;
int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3; int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
inode = new_inode(dir->i_sb) ; inode = new_inode(dir->i_sb) ;
if (!inode) { if (!inode) {
return -ENOMEM ; return -ENOMEM ;
} }
retval = new_inode_init(inode, dir, mode);
if (retval) {
return retval;
}
lock_kernel();
item_len = ROUND_UP (strlen (symname)); item_len = ROUND_UP (strlen (symname));
if (item_len > MAX_DIRECT_ITEM_LEN (dir->i_sb->s_blocksize)) { if (item_len > MAX_DIRECT_ITEM_LEN (dir->i_sb->s_blocksize)) {
iput(inode) ; retval = -ENAMETOOLONG;
return -ENAMETOOLONG; drop_new_inode(inode);
goto out_failed;
} }
lock_kernel();
name = reiserfs_kmalloc (item_len, GFP_NOFS, dir->i_sb); name = reiserfs_kmalloc (item_len, GFP_NOFS, dir->i_sb);
if (!name) { if (!name) {
iput(inode) ; drop_new_inode(inode);
unlock_kernel(); retval = -ENOMEM;
return -ENOMEM; goto out_failed;
} }
memcpy (name, symname, strlen (symname)); memcpy (name, symname, strlen (symname));
padd_item (name, item_len, strlen (symname)); padd_item (name, item_len, strlen (symname));
journal_begin(&th, dir->i_sb, jbegin_count) ; journal_begin(&th, dir->i_sb, jbegin_count) ;
windex = push_journal_writer("reiserfs_symlink") ;
inode = reiserfs_new_inode (&th, dir, S_IFLNK | S_IRWXUGO, name, strlen (symname), dentry, retval = reiserfs_new_inode (&th, dir, mode, name, strlen (symname),
inode, &retval); dentry, inode);
reiserfs_kfree (name, item_len, dir->i_sb); reiserfs_kfree (name, item_len, dir->i_sb);
if (inode == 0) { /* reiserfs_new_inode iputs for us */ if (retval) { /* reiserfs_new_inode iputs for us */
pop_journal_writer(windex) ; goto out_failed;
journal_end(&th, dir->i_sb, jbegin_count) ;
unlock_kernel();
return retval;
} }
reiserfs_update_inode_transaction(inode) ; reiserfs_update_inode_transaction(inode) ;
...@@ -996,18 +1022,16 @@ static int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const c ...@@ -996,18 +1022,16 @@ static int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const c
if (retval) { if (retval) {
inode->i_nlink--; inode->i_nlink--;
reiserfs_update_sd (&th, inode); reiserfs_update_sd (&th, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
iput (inode); iput (inode);
unlock_kernel(); goto out_failed;
return retval;
} }
d_instantiate(dentry, inode); d_instantiate(dentry, inode);
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ; journal_end(&th, dir->i_sb, jbegin_count) ;
out_failed:
unlock_kernel(); unlock_kernel();
return 0; return retval;
} }
......
...@@ -839,6 +839,8 @@ static struct xor_block_template xor_block_pIII_sse = { ...@@ -839,6 +839,8 @@ static struct xor_block_template xor_block_pIII_sse = {
/* Also try the generic routines. */ /* Also try the generic routines. */
#include <asm-generic/xor.h> #include <asm-generic/xor.h>
#define cpu_has_mmx (test_bit(X86_FEATURE_MMX, boot_cpu_data.x86_capability))
#undef XOR_TRY_TEMPLATES #undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \ #define XOR_TRY_TEMPLATES \
do { \ do { \
...@@ -846,7 +848,7 @@ static struct xor_block_template xor_block_pIII_sse = { ...@@ -846,7 +848,7 @@ static struct xor_block_template xor_block_pIII_sse = {
xor_speed(&xor_block_32regs); \ xor_speed(&xor_block_32regs); \
if (cpu_has_xmm) \ if (cpu_has_xmm) \
xor_speed(&xor_block_pIII_sse); \ xor_speed(&xor_block_pIII_sse); \
if (md_cpu_has_mmx()) { \ if (cpu_has_mmx) { \
xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \ xor_speed(&xor_block_p5_mmx); \
} \ } \
......
...@@ -759,6 +759,9 @@ struct inode_operations { ...@@ -759,6 +759,9 @@ struct inode_operations {
struct seq_file; struct seq_file;
extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
/* /*
* NOTE: write_inode, delete_inode, clear_inode, put_inode can be called * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called
* without the big kernel lock held in all filesystems. * without the big kernel lock held in all filesystems.
......
...@@ -169,8 +169,9 @@ struct mdk_rdev_s ...@@ -169,8 +169,9 @@ struct mdk_rdev_s
struct block_device *bdev; /* block device handle */ struct block_device *bdev; /* block device handle */
mdp_super_t *sb; struct page *sb_page;
unsigned long sb_offset; mdp_super_t *sb;
unsigned long sb_offset;
int alias_device; /* device alias to the same disk */ int alias_device; /* device alias to the same disk */
int faulty; /* if faulty do not issue IO requests */ int faulty; /* if faulty do not issue IO requests */
......
...@@ -7,21 +7,21 @@ ...@@ -7,21 +7,21 @@
/* /*
* *
* Each stripe contains one buffer per disc. Each buffer can be in * Each stripe contains one buffer per disc. Each buffer can be in
* one of a number of states determined by bh_state. Changes between * one of a number of states stored in "flags". Changes between
* these states happen *almost* exclusively under a per-stripe * these states happen *almost* exclusively under a per-stripe
* spinlock. Some very specific changes can happen in b_end_io, and * spinlock. Some very specific changes can happen in bi_end_io, and
* these are not protected by the spin lock. * these are not protected by the spin lock.
* *
* The bh_state bits that are used to represent these states are: * The flag bits that are used to represent these states are:
* BH_Uptodate, BH_Lock * R5_UPTODATE and R5_LOCKED
* *
* State Empty == !Uptodate, !Lock * State Empty == !UPTODATE, !LOCK
* We have no data, and there is no active request * We have no data, and there is no active request
* State Want == !Uptodate, Lock * State Want == !UPTODATE, LOCK
* A read request is being submitted for this block * A read request is being submitted for this block
* State Dirty == Uptodate, Lock * State Dirty == UPTODATE, LOCK
* Some new data is in this buffer, and it is being written out * Some new data is in this buffer, and it is being written out
* State Clean == Uptodate, !Lock * State Clean == UPTODATE, !LOCK
* We have valid data which is the same as on disc * We have valid data which is the same as on disc
* *
* The possible state transitions are: * The possible state transitions are:
...@@ -124,24 +124,29 @@ ...@@ -124,24 +124,29 @@
* plus raid5d if it is handling it, plus one for each active request * plus raid5d if it is handling it, plus one for each active request
* on a cached buffer. * on a cached buffer.
*/ */
struct stripe_head { struct stripe_head {
struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ struct stripe_head *hash_next, **hash_pprev; /* hash pointers */
struct list_head lru; /* inactive_list or handle_list */ struct list_head lru; /* inactive_list or handle_list */
struct raid5_private_data *raid_conf; struct raid5_private_data *raid_conf;
struct buffer_head *bh_cache[MD_SB_DISKS]; /* buffered copy */ sector_t sector; /* sector of this row */
struct buffer_head *bh_read[MD_SB_DISKS]; /* read request buffers of the MD device */
struct buffer_head *bh_write[MD_SB_DISKS]; /* write request buffers of the MD device */
struct buffer_head *bh_written[MD_SB_DISKS]; /* write request buffers of the MD device that have been scheduled for write */
struct page *bh_page[MD_SB_DISKS]; /* saved bh_cache[n]->b_page when reading around the cache */
unsigned long sector; /* sector of this row */
int size; /* buffers size */
int pd_idx; /* parity disk index */ int pd_idx; /* parity disk index */
unsigned long state; /* state flags */ unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */ atomic_t count; /* nr of active thread/requests */
spinlock_t lock; spinlock_t lock;
int sync_redone; struct r5dev {
struct bio req;
struct bio_vec vec;
struct page *page;
struct bio *toread, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
} dev[1]; /* allocated with extra space depending of RAID geometry */
}; };
/* Flags */
#define R5_UPTODATE 0 /* page contains current data */
#define R5_LOCKED 1 /* IO has been submitted on "req" */
#define R5_OVERWRITE 2 /* towrite covers whole page */
/* /*
* Write method * Write method
...@@ -187,6 +192,7 @@ struct stripe_head { ...@@ -187,6 +192,7 @@ struct stripe_head {
struct disk_info { struct disk_info {
kdev_t dev; kdev_t dev;
struct block_device *bdev;
int operational; int operational;
int number; int number;
int raid_disk; int raid_disk;
...@@ -201,7 +207,6 @@ struct raid5_private_data { ...@@ -201,7 +207,6 @@ struct raid5_private_data {
mdk_thread_t *thread, *resync_thread; mdk_thread_t *thread, *resync_thread;
struct disk_info disks[MD_SB_DISKS]; struct disk_info disks[MD_SB_DISKS];
struct disk_info *spare; struct disk_info *spare;
int buffer_size;
int chunk_size, level, algorithm; int chunk_size, level, algorithm;
int raid_disks, working_disks, failed_disks; int raid_disks, working_disks, failed_disks;
int resync_parity; int resync_parity;
...@@ -210,16 +215,19 @@ struct raid5_private_data { ...@@ -210,16 +215,19 @@ struct raid5_private_data {
struct list_head handle_list; /* stripes needing handling */ struct list_head handle_list; /* stripes needing handling */
struct list_head delayed_list; /* stripes that have plugged requests */ struct list_head delayed_list; /* stripes that have plugged requests */
atomic_t preread_active_stripes; /* stripes with scheduled io */ atomic_t preread_active_stripes; /* stripes with scheduled io */
char cache_name[20];
kmem_cache_t *slab_cache; /* for allocating stripes */
/* /*
* Free stripes pool * Free stripes pool
*/ */
atomic_t active_stripes; atomic_t active_stripes;
struct list_head inactive_list; struct list_head inactive_list;
md_wait_queue_head_t wait_for_stripe; wait_queue_head_t wait_for_stripe;
int inactive_blocked; /* release of inactive stripes blocked, int inactive_blocked; /* release of inactive stripes blocked,
* waiting for 25% to be free * waiting for 25% to be free
*/ */
md_spinlock_t device_lock; spinlock_t device_lock;
int plugged; int plugged;
struct tq_struct plug_tq; struct tq_struct plug_tq;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define MAX_XOR_BLOCKS 5 #define MAX_XOR_BLOCKS 5
extern void xor_block(unsigned int count, struct buffer_head **bh_ptr); extern void xor_block(unsigned int count, unsigned int bytes, void **ptr);
struct xor_block_template { struct xor_block_template {
struct xor_block_template *next; struct xor_block_template *next;
......
...@@ -1841,10 +1841,10 @@ struct inode * reiserfs_iget (struct super_block * s, ...@@ -1841,10 +1841,10 @@ struct inode * reiserfs_iget (struct super_block * s,
const struct cpu_key * key); const struct cpu_key * key);
struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th, int reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct inode * dir, int mode, struct inode * dir, int mode,
const char * symname, int item_len, const char * symname, loff_t i_size,
struct dentry *dentry, struct inode *inode, int * err); struct dentry *dentry, struct inode *inode);
int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode); int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode);
void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode); void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode);
......
...@@ -231,7 +231,7 @@ void daemonize(void) ...@@ -231,7 +231,7 @@ void daemonize(void)
/* /*
* When we die, we re-parent all our children. * When we die, we re-parent all our children.
* Try to give them to another thread in our process * Try to give them to another thread in our thread
* group, and if no such member exists, give it to * group, and if no such member exists, give it to
* the global child reaper process (ie "init") * the global child reaper process (ie "init")
*/ */
...@@ -241,8 +241,14 @@ static inline void forget_original_parent(struct task_struct * father) ...@@ -241,8 +241,14 @@ static inline void forget_original_parent(struct task_struct * father)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
/* Next in our thread group */ /* Next in our thread group, if they're not already exiting */
reaper = next_thread(father); reaper = father;
do {
reaper = next_thread(reaper);
if (!(reaper->flags & PF_EXITING))
break;
} while (reaper != father);
if (reaper == father) if (reaper == father)
reaper = child_reaper; reaper = child_reaper;
......
...@@ -243,6 +243,8 @@ EXPORT_SYMBOL(shrink_dcache_anon); ...@@ -243,6 +243,8 @@ EXPORT_SYMBOL(shrink_dcache_anon);
EXPORT_SYMBOL(find_inode_number); EXPORT_SYMBOL(find_inode_number);
EXPORT_SYMBOL(is_subdir); EXPORT_SYMBOL(is_subdir);
EXPORT_SYMBOL(get_unused_fd); EXPORT_SYMBOL(get_unused_fd);
EXPORT_SYMBOL(vfs_read);
EXPORT_SYMBOL(vfs_write);
EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mkdir);
EXPORT_SYMBOL(vfs_mknod); EXPORT_SYMBOL(vfs_mknod);
......
...@@ -949,12 +949,10 @@ svc_sock_update_bufs(struct svc_serv *serv) ...@@ -949,12 +949,10 @@ svc_sock_update_bufs(struct svc_serv *serv)
if (sock->type == SOCK_DGRAM) { if (sock->type == SOCK_DGRAM) {
/* udp sockets need large rcvbuf as all pending /* udp sockets need large rcvbuf as all pending
* requests are still in that buffer. * requests are still in that buffer.
* As outgoing requests do not wait for an
* ACK, only a moderate sndbuf is needed
*/ */
svc_sock_setbufsize(sock, svc_sock_setbufsize(sock,
5 * serv->sv_bufsz, (serv->sv_nrthreads+3) * serv->sv_bufsz,
(serv->sv_nrthreads+2)* serv->sv_bufsz); (serv->sv_nrthreads+3) * serv->sv_bufsz);
} else if (svsk->sk_sk->state != TCP_LISTEN) { } else if (svsk->sk_sk->state != TCP_LISTEN) {
printk(KERN_ERR "RPC update_bufs: permanent sock neither UDP or TCP_LISTEN\n"); printk(KERN_ERR "RPC update_bufs: permanent sock neither UDP or TCP_LISTEN\n");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment