Commit 750b8c7c authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/network-2.5

into nuts.ninka.net:/home/davem/src/BK/net-2.5
parents cbe7defa 8a36b98f
...@@ -67,15 +67,22 @@ If the user applications are going to request hugepages using mmap system ...@@ -67,15 +67,22 @@ If the user applications are going to request hugepages using mmap system
call, then it is required that system administrator mount a file system of call, then it is required that system administrator mount a file system of
type hugetlbfs: type hugetlbfs:
mount none /mnt/huge -t hugetlbfs mount none /mnt/huge -t hugetlbfs <uid=value> <gid=value> <mode=value>
This command mounts a (pseudo) filesystem of type hugetlbfs on the directory This command mounts a (pseudo) filesystem of type hugetlbfs on the directory
/mnt/huge. Any files created on /mnt/huge uses hugepages. An example is /mnt/huge. Any files created on /mnt/huge uses hugepages. The uid and gid
given at the end of this document. options sets the owner and group of the root of the file system. By default
the uid and gid of the current process are taken. The mode option sets the
mode of root of file system to value & 0777. This value is given in octal.
By default the value 0755 is picked. An example is given at the end of this
document.
read and write system calls are not supported on files that reside on hugetlb read and write system calls are not supported on files that reside on hugetlb
file systems. file systems.
A regular chown, chgrp and chmod commands (with right permissions) could be
used to change the file attributes on hugetlbfs.
Also, it is important to note that no such mount command is required if the Also, it is important to note that no such mount command is required if the
applications are going to use only shmat/shmget system calls. It is possible applications are going to use only shmat/shmget system calls. It is possible
for same or different applications to use any combination of mmaps and shm* for same or different applications to use any combination of mmaps and shm*
......
...@@ -239,7 +239,7 @@ static unsigned int nr_thread_info; ...@@ -239,7 +239,7 @@ static unsigned int nr_thread_info;
#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) #define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define ll_free_task_struct(p) free_pages((unsigned long)(p),1) #define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
struct thread_info *alloc_thread_info(void) struct thread_info *alloc_thread_info(struct task_struct *task)
{ {
struct thread_info *thread = NULL; struct thread_info *thread = NULL;
......
...@@ -121,6 +121,15 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq) ...@@ -121,6 +121,15 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq)
__deadline_del_drq_hash(drq); __deadline_del_drq_hash(drq);
} }
static void
deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
{
deadline_del_drq_hash(drq);
if (q->last_merge == &drq->request->queuelist)
q->last_merge = NULL;
}
static inline void static inline void
deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
{ {
...@@ -310,7 +319,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq) ...@@ -310,7 +319,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
struct deadline_data *dd = q->elevator.elevator_data; struct deadline_data *dd = q->elevator.elevator_data;
list_del_init(&drq->fifo); list_del_init(&drq->fifo);
deadline_del_drq_hash(drq); deadline_remove_merge_hints(q, drq);
deadline_del_drq_rb(dd, drq); deadline_del_drq_rb(dd, drq);
} }
} }
......
...@@ -2860,7 +2860,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -2860,7 +2860,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
loff_t l = *pos; loff_t l = *pos;
mddev_t *mddev; mddev_t *mddev;
if (l > 0x10000) if (l >= 0x10000)
return NULL; return NULL;
if (!l--) if (!l--)
/* header */ /* header */
...@@ -2875,7 +2875,9 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -2875,7 +2875,9 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
return mddev; return mddev;
} }
spin_unlock(&all_mddevs_lock); spin_unlock(&all_mddevs_lock);
return (void*)2;/* tail */ if (!l--)
return (void*)2;/* tail */
return NULL;
} }
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
......
...@@ -85,10 +85,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -85,10 +85,8 @@ static int create_strip_zones (mddev_t *mddev)
conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* conf->devlist = kmalloc(sizeof(mdk_rdev_t*)*
conf->nr_strip_zones*mddev->raid_disks, conf->nr_strip_zones*mddev->raid_disks,
GFP_KERNEL); GFP_KERNEL);
if (!conf->devlist) { if (!conf->devlist)
kfree(conf);
return 1; return 1;
}
memset(conf->strip_zone, 0,sizeof(struct strip_zone)* memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
conf->nr_strip_zones); conf->nr_strip_zones);
...@@ -193,8 +191,6 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -193,8 +191,6 @@ static int create_strip_zones (mddev_t *mddev)
printk("raid0: done.\n"); printk("raid0: done.\n");
return 0; return 0;
abort: abort:
kfree(conf->devlist);
kfree(conf->strip_zone);
return 1; return 1;
} }
...@@ -235,6 +231,8 @@ static int raid0_run (mddev_t *mddev) ...@@ -235,6 +231,8 @@ static int raid0_run (mddev_t *mddev)
goto out; goto out;
mddev->private = (void *)conf; mddev->private = (void *)conf;
conf->strip_zone = NULL;
conf->devlist = NULL;
if (create_strip_zones (mddev)) if (create_strip_zones (mddev))
goto out_free_conf; goto out_free_conf;
...@@ -273,7 +271,7 @@ static int raid0_run (mddev_t *mddev) ...@@ -273,7 +271,7 @@ static int raid0_run (mddev_t *mddev)
nb_zone*sizeof(struct strip_zone*)); nb_zone*sizeof(struct strip_zone*));
conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
if (!conf->hash_table) if (!conf->hash_table)
goto out_free_zone_conf; goto out_free_conf;
size = conf->strip_zone[cur].size; size = conf->strip_zone[cur].size;
for (i=0; i< nb_zone; i++) { for (i=0; i< nb_zone; i++) {
...@@ -296,12 +294,11 @@ static int raid0_run (mddev_t *mddev) ...@@ -296,12 +294,11 @@ static int raid0_run (mddev_t *mddev)
blk_queue_merge_bvec(&mddev->queue, raid0_mergeable_bvec); blk_queue_merge_bvec(&mddev->queue, raid0_mergeable_bvec);
return 0; return 0;
out_free_zone_conf:
kfree(conf->strip_zone);
conf->strip_zone = NULL;
out_free_conf: out_free_conf:
kfree (conf->devlist); if (conf->strip_zone)
kfree(conf->strip_zone);
if (conf->devlist)
kfree (conf->devlist);
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: out:
......
...@@ -462,7 +462,7 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -462,7 +462,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mirror_info_t *mirror; mirror_info_t *mirror;
r1bio_t *r1_bio; r1bio_t *r1_bio;
struct bio *read_bio; struct bio *read_bio;
int i, sum_bios = 0, disks = conf->raid_disks; int i, disks = conf->raid_disks;
/* /*
* Register the new request and wait if the reconstruction * Register the new request and wait if the reconstruction
...@@ -525,6 +525,9 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -525,6 +525,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
r1_bio->write_bios[i] = NULL; r1_bio->write_bios[i] = NULL;
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
atomic_set(&r1_bio->remaining, 1);
md_write_start(mddev);
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
struct bio *mbio; struct bio *mbio;
if (!r1_bio->write_bios[i]) if (!r1_bio->write_bios[i])
...@@ -539,37 +542,7 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -539,37 +542,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mbio->bi_rw = r1_bio->cmd; mbio->bi_rw = r1_bio->cmd;
mbio->bi_private = r1_bio; mbio->bi_private = r1_bio;
sum_bios++; atomic_inc(&r1_bio->remaining);
}
if (!sum_bios) {
/*
* If all mirrors are non-operational
* then return an IO error:
*/
md_write_end(mddev);
raid_end_bio_io(r1_bio);
return 0;
}
atomic_set(&r1_bio->remaining, sum_bios+1);
/*
* We have to be a bit careful about the semaphore above, thats
* why we start the requests separately. Since generic_make_request()
* can sleep, this is the safer solution. Imagine, raid1_end_request
* decreasing the semaphore before we could have set it up ...
* We could play tricks with the semaphore (presetting it and
* correcting at the end if sum_bios is not 'n' but we have to
* do raid1_end_request by hand if all requests finish until we had a
* chance to set up the semaphore correctly ... lots of races).
*/
md_write_start(mddev);
for (i=disks; i--; ) {
struct bio *mbio;
mbio = r1_bio->write_bios[i];
if (!mbio)
continue;
generic_make_request(mbio); generic_make_request(mbio);
} }
...@@ -802,7 +775,7 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) ...@@ -802,7 +775,7 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev_to_conf(mddev);
int i, sum_bios = 0; int i;
int disks = conf->raid_disks; int disks = conf->raid_disks;
struct bio *bio, *mbio; struct bio *bio, *mbio;
...@@ -849,7 +822,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -849,7 +822,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
} }
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
for (i = 0; i < disks ; i++) { atomic_set(&r1_bio->remaining, 1);
for (i = disks; i-- ; ) {
if (!r1_bio->write_bios[i]) if (!r1_bio->write_bios[i])
continue; continue;
mbio = bio_clone(bio, GFP_NOIO); mbio = bio_clone(bio, GFP_NOIO);
...@@ -860,32 +834,14 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -860,32 +834,14 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
mbio->bi_rw = WRITE; mbio->bi_rw = WRITE;
mbio->bi_private = r1_bio; mbio->bi_private = r1_bio;
sum_bios++; atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev, mbio->bi_size >> 9);
generic_make_request(mbio);
} }
if (i != disks)
BUG();
atomic_set(&r1_bio->remaining, sum_bios);
if (atomic_dec_and_test(&r1_bio->remaining)) {
if (!sum_bios) {
/*
* Nowhere to write this to... I guess we
* must be done
*/
printk(KERN_ALERT "raid1: sync aborting as there is nowhere"
" to write sector %llu\n",
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
put_buf(r1_bio); put_buf(r1_bio);
return;
}
for (i = 0; i < disks ; i++) {
mbio = r1_bio->write_bios[i];
if (!mbio)
continue;
md_sync_acct(conf->mirrors[i].rdev, mbio->bi_size >> 9);
generic_make_request(mbio);
} }
} }
......
...@@ -940,7 +940,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -940,7 +940,7 @@ static void handle_stripe(struct stripe_head *sh)
/* and fail all 'written' */ /* and fail all 'written' */
bi = sh->dev[i].written; bi = sh->dev[i].written;
sh->dev[i].written = NULL; sh->dev[i].written = NULL;
while (bi && bi->bi_sector < dev->sector + STRIPE_SECTORS) { while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = bi->bi_next; struct bio *bi2 = bi->bi_next;
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (--bi->bi_phys_segments == 0) { if (--bi->bi_phys_segments == 0) {
......
...@@ -389,9 +389,6 @@ static int cadet_do_ioctl(struct inode *inode, struct file *file, ...@@ -389,9 +389,6 @@ static int cadet_do_ioctl(struct inode *inode, struct file *file,
v->flags|=VIDEO_TUNER_STEREO_ON; v->flags|=VIDEO_TUNER_STEREO_ON;
} }
v->flags|=cadet_getrds(); v->flags|=cadet_getrds();
if(copy_to_user(arg,&v, sizeof(v))) {
return -EFAULT;
}
break; break;
case 1: case 1:
strcpy(v->name,"AM"); strcpy(v->name,"AM");
...@@ -402,9 +399,6 @@ static int cadet_do_ioctl(struct inode *inode, struct file *file, ...@@ -402,9 +399,6 @@ static int cadet_do_ioctl(struct inode *inode, struct file *file,
v->mode=0; v->mode=0;
v->mode|=VIDEO_MODE_AUTO; v->mode|=VIDEO_MODE_AUTO;
v->signal=sigstrength; v->signal=sigstrength;
if(copy_to_user(arg,&v, sizeof(v))) {
return -EFAULT;
}
break; break;
} }
return 0; return 0;
......
...@@ -723,8 +723,6 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file, ...@@ -723,8 +723,6 @@ static int qcam_do_ioctl(struct inode *inode, struct file *file,
/* Good question.. its composite or SVHS so.. */ /* Good question.. its composite or SVHS so.. */
v->type = VIDEO_TYPE_CAMERA; v->type = VIDEO_TYPE_CAMERA;
strcpy(v->name, "Camera"); strcpy(v->name, "Camera");
if(copy_to_user(arg, &v, sizeof(v)))
return -EFAULT;
return 0; return 0;
} }
case VIDIOCSCHAN: case VIDIOCSCHAN:
......
...@@ -1693,12 +1693,12 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb ...@@ -1693,12 +1693,12 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb
for (x=0; optr+1<eptr && x<-done->w; x++) for (x=0; optr+1<eptr && x<-done->w; x++)
{ {
unsigned char a = iptr[x*2]; unsigned char a = iptr[x*2];
*optr++ = a; __put_user(a, optr++);
*optr++ = a; __put_user(a, optr++);
} }
/* and clear the rest of the line */ /* and clear the rest of the line */
for (x*=2; optr<eptr && x<done->bpl; x++) for (x*=2; optr<eptr && x<done->bpl; x++)
*optr++ = 0; __put_user(0, optr++);
/* next line */ /* next line */
iptr += done->bpl; iptr += done->bpl;
} }
...@@ -1715,10 +1715,10 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb ...@@ -1715,10 +1715,10 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb
{ {
/* copy to doubled data to userland */ /* copy to doubled data to userland */
for (x=0; optr<eptr && x<-done->w; x++) for (x=0; optr<eptr && x<-done->w; x++)
*optr++ = iptr[x*2]; __put_user(iptr[x*2], optr++);
/* and clear the rest of the line */ /* and clear the rest of the line */
for (;optr<eptr && x<done->bpl; x++) for (;optr<eptr && x<done->bpl; x++)
*optr++ = 0; __put_user(0, optr++);
/* next line */ /* next line */
iptr += done->bpl; iptr += done->bpl;
} }
...@@ -1727,7 +1727,7 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb ...@@ -1727,7 +1727,7 @@ long vbi_read(struct video_device* dev, char* buf, unsigned long count, int nonb
/* API compliance: /* API compliance:
* place the framenumber (half fieldnr) in the last long * place the framenumber (half fieldnr) in the last long
*/ */
((ulong*)eptr)[-1] = done->fieldnr/2; __put_user(done->fieldnr/2, ((ulong*)eptr)[-1]);
} }
/* keep the engine running */ /* keep the engine running */
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/pnpbios.h> #include <linux/pnpbios.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/uaccess.h>
static struct proc_dir_entry *proc_pnp = NULL; static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL; static struct proc_dir_entry *proc_pnp_boot = NULL;
...@@ -178,18 +180,31 @@ static int proc_write_node(struct file *file, const char *buf, ...@@ -178,18 +180,31 @@ static int proc_write_node(struct file *file, const char *buf,
struct pnp_bios_node *node; struct pnp_bios_node *node;
int boot = (long)data >> 8; int boot = (long)data >> 8;
u8 nodenum = (long)data; u8 nodenum = (long)data;
int ret = count;
node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
if (!node) return -ENOMEM; if (!node)
if ( pnp_bios_get_dev_node(&nodenum, boot, node) ) return -ENOMEM;
return -EIO; if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
if (count != node->size - sizeof(struct pnp_bios_node)) ret = -EIO;
return -EINVAL; goto out;
memcpy(node->data, buf, count); }
if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) if (count != node->size - sizeof(struct pnp_bios_node)) {
return -EINVAL; ret = -EINVAL;
goto out;
}
if (copy_from_user(node->data, buf, count)) {
ret = -EFAULT;
goto out;
}
if (pnp_bios_set_dev_node(node->handle, boot, node) != 0) {
ret = -EINVAL;
goto out;
}
ret = count;
out:
kfree(node); kfree(node);
return count; return ret;
} }
int pnpbios_interface_attach_device(struct pnp_bios_node * node) int pnpbios_interface_attach_device(struct pnp_bios_node * node)
......
...@@ -252,7 +252,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -252,7 +252,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
return ctx; return ctx;
out_cleanup: out_cleanup:
atomic_sub(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ atomic_sub(ctx->max_reqs, &aio_nr);
ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */ ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */
__put_ioctx(ctx); __put_ioctx(ctx);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
...@@ -405,9 +405,6 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx) ...@@ -405,9 +405,6 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
list_add(&req->ki_list, &ctx->active_reqs); list_add(&req->ki_list, &ctx->active_reqs);
get_ioctx(ctx); get_ioctx(ctx);
ctx->reqs_active++; ctx->reqs_active++;
req->ki_user_obj = NULL;
req->ki_ctx = ctx;
req->ki_users = 1;
okay = 1; okay = 1;
} }
kunmap_atomic(ring, KM_USER0); kunmap_atomic(ring, KM_USER0);
...@@ -949,7 +946,7 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t *ctxp) ...@@ -949,7 +946,7 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t *ctxp)
goto out; goto out;
ret = -EINVAL; ret = -EINVAL;
if (unlikely(ctx || !nr_events || (int)nr_events < 0)) { if (unlikely(ctx || (int)nr_events <= 0)) {
pr_debug("EINVAL: io_setup: ctx or nr_events > max\n"); pr_debug("EINVAL: io_setup: ctx or nr_events > max\n");
goto out; goto out;
} }
...@@ -984,9 +981,7 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) ...@@ -984,9 +981,7 @@ asmlinkage long sys_io_destroy(aio_context_t ctx)
return -EINVAL; return -EINVAL;
} }
int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb *user_iocb, int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb));
int io_submit_one(struct kioctx *ctx, struct iocb *user_iocb,
struct iocb *iocb) struct iocb *iocb)
{ {
struct kiocb *req; struct kiocb *req;
...@@ -1098,7 +1093,7 @@ int io_submit_one(struct kioctx *ctx, struct iocb *user_iocb, ...@@ -1098,7 +1093,7 @@ int io_submit_one(struct kioctx *ctx, struct iocb *user_iocb,
* fail with -ENOSYS if not implemented. * fail with -ENOSYS if not implemented.
*/ */
asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
struct iocb **iocbpp) struct iocb __user **iocbpp)
{ {
struct kioctx *ctx; struct kioctx *ctx;
long ret = 0; long ret = 0;
...@@ -1116,8 +1111,13 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr, ...@@ -1116,8 +1111,13 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
return -EINVAL; return -EINVAL;
} }
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
*/
for (i=0; i<nr; i++) { for (i=0; i<nr; i++) {
struct iocb *user_iocb, tmp; struct iocb __user *user_iocb;
struct iocb tmp;
if (unlikely(__get_user(user_iocb, iocbpp + i))) { if (unlikely(__get_user(user_iocb, iocbpp + i))) {
ret = -EFAULT; ret = -EFAULT;
......
...@@ -822,9 +822,6 @@ int __set_page_dirty_buffers(struct page *page) ...@@ -822,9 +822,6 @@ int __set_page_dirty_buffers(struct page *page)
goto out; goto out;
} }
if (!PageUptodate(page))
buffer_error();
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page); struct buffer_head *head = page_buffers(page);
......
...@@ -1710,6 +1710,13 @@ void devfs_remove(const char *fmt, ...) ...@@ -1710,6 +1710,13 @@ void devfs_remove(const char *fmt, ...)
if (n < 64 && buf[0]) { if (n < 64 && buf[0]) {
devfs_handle_t de = _devfs_find_entry(NULL, buf, 0); devfs_handle_t de = _devfs_find_entry(NULL, buf, 0);
if (!de) {
printk(KERN_ERR "%s: %s not found, cannot remove\n",
__FUNCTION__, buf);
dump_stack();
return;
}
write_lock(&de->parent->u.dir.lock); write_lock(&de->parent->u.dir.lock);
_devfs_unregister(de->parent, de); _devfs_unregister(de->parent, de);
devfs_put(de); devfs_put(de);
......
...@@ -48,9 +48,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -48,9 +48,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
loff_t len; loff_t len;
int ret; int ret;
if (!capable(CAP_IPC_LOCK))
return -EPERM;
if (vma->vm_start & ~HPAGE_MASK) if (vma->vm_start & ~HPAGE_MASK)
return -EINVAL; return -EINVAL;
...@@ -231,6 +228,7 @@ static void hugetlbfs_forget_inode(struct inode *inode) ...@@ -231,6 +228,7 @@ static void hugetlbfs_forget_inode(struct inode *inode)
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
if (inode->i_data.nrpages) if (inode->i_data.nrpages)
truncate_hugepages(&inode->i_data, 0); truncate_hugepages(&inode->i_data, 0);
clear_inode(inode); clear_inode(inode);
destroy_inode(inode); destroy_inode(inode);
} }
...@@ -317,7 +315,6 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -317,7 +315,6 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
int error; int error;
unsigned int ia_valid = attr->ia_valid; unsigned int ia_valid = attr->ia_valid;
unsigned long dn_mask;
BUG_ON(!inode); BUG_ON(!inode);
...@@ -335,26 +332,21 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -335,26 +332,21 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error) if (error)
goto out; goto out;
attr->ia_valid &= ~ATTR_SIZE; attr->ia_valid &= ~ATTR_SIZE;
error = inode_setattr(inode, attr);
} }
if (error) error = inode_setattr(inode, attr);
goto out;
dn_mask = setattr_mask(ia_valid);
if (dn_mask)
dnotify_parent(dentry, dn_mask);
out: out:
return error; return error;
} }
static struct inode *hugetlbfs_get_inode(struct super_block *sb, static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
int mode, dev_t dev) gid_t gid, int mode, dev_t dev)
{ {
struct inode * inode = new_inode(sb); struct inode * inode = new_inode(sb);
if (inode) { if (inode) {
inode->i_mode = mode; inode->i_mode = mode;
inode->i_uid = current->fsuid; inode->i_uid = uid;
inode->i_gid = current->fsgid; inode->i_gid = gid;
inode->i_blksize = PAGE_CACHE_SIZE; inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0; inode->i_blocks = 0;
inode->i_rdev = NODEV; inode->i_rdev = NODEV;
...@@ -391,7 +383,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, ...@@ -391,7 +383,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
static int hugetlbfs_mknod(struct inode *dir, static int hugetlbfs_mknod(struct inode *dir,
struct dentry *dentry, int mode, dev_t dev) struct dentry *dentry, int mode, dev_t dev)
{ {
struct inode * inode = hugetlbfs_get_inode(dir->i_sb, mode, dev); struct inode * inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
current->fsgid, mode, dev);
int error = -ENOSPC; int error = -ENOSPC;
if (inode) { if (inode) {
...@@ -421,7 +414,8 @@ static int hugetlbfs_symlink(struct inode *dir, ...@@ -421,7 +414,8 @@ static int hugetlbfs_symlink(struct inode *dir,
struct inode *inode; struct inode *inode;
int error = -ENOSPC; int error = -ENOSPC;
inode = hugetlbfs_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid,
current->fsgid, S_IFLNK|S_IRWXUGO, 0);
if (inode) { if (inode) {
int l = strlen(symname)+1; int l = strlen(symname)+1;
error = page_symlink(inode, symname, l); error = page_symlink(inode, symname, l);
...@@ -478,16 +472,68 @@ static struct super_operations hugetlbfs_ops = { ...@@ -478,16 +472,68 @@ static struct super_operations hugetlbfs_ops = {
}; };
static int static int
hugetlbfs_fill_super(struct super_block * sb, void * data, int silent) hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
{
char *opt, *value;
int ret = 0;
if (!options)
goto out;
while ((opt = strsep(&options, ",")) != NULL) {
if (!*opt)
continue;
value = strchr(opt, '=');
if (!value || !*value) {
ret = -EINVAL;
goto out;
} else {
*value++ = '\0';
}
if (!strcmp(opt, "uid"))
pconfig->uid = simple_strtoul(value, &value, 0);
else if (!strcmp(opt, "gid"))
pconfig->gid = simple_strtoul(value, &value, 0);
else if (!strcmp(opt, "mode"))
pconfig->mode = simple_strtoul(value,&value,0) & 0777U;
else {
ret = -EINVAL;
goto out;
}
if (*value) {
ret = -EINVAL;
goto out;
}
}
return 0;
out:
pconfig->uid = current->fsuid;
pconfig->gid = current->fsgid;
pconfig->mode = 0755;
return ret;
}
static int
hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
{ {
struct inode * inode; struct inode * inode;
struct dentry * root; struct dentry * root;
int ret;
struct hugetlbfs_config config;
ret = hugetlbfs_parse_options(data, &config);
if (ret) {
printk("hugetlbfs: invalid mount options: %s.\n", data);
return ret;
}
sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = HUGETLBFS_MAGIC; sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops; sb->s_op = &hugetlbfs_ops;
inode = hugetlbfs_get_inode(sb, S_IFDIR | 0755, 0); inode = hugetlbfs_get_inode(sb, config.uid, config.gid,
S_IFDIR | config.mode, 0);
if (!inode) if (!inode)
return -ENOMEM; return -ENOMEM;
...@@ -548,7 +594,8 @@ struct file *hugetlb_zero_setup(size_t size) ...@@ -548,7 +594,8 @@ struct file *hugetlb_zero_setup(size_t size)
goto out_dentry; goto out_dentry;
error = -ENOSPC; error = -ENOSPC;
inode = hugetlbfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); inode = hugetlbfs_get_inode(root->d_sb, current->fsuid,
current->fsgid, S_IFREG | S_IRWXUGO, 0);
if (!inode) if (!inode)
goto out_file; goto out_file;
......
...@@ -916,7 +916,7 @@ int journal_create(journal_t *journal) ...@@ -916,7 +916,7 @@ int journal_create(journal_t *journal)
__brelse(bh); __brelse(bh);
} }
fsync_bdev(journal->j_dev); sync_blockdev(journal->j_dev);
jbd_debug(1, "JBD: journal cleared.\n"); jbd_debug(1, "JBD: journal cleared.\n");
/* OK, fill in the initial static fields in the new superblock */ /* OK, fill in the initial static fields in the new superblock */
......
...@@ -736,6 +736,7 @@ int reiserfs_prepare_file_region_for_write( ...@@ -736,6 +736,7 @@ int reiserfs_prepare_file_region_for_write(
struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with
INITIALIZE_PATH(path); // path to item, that we are going to deal with. INITIALIZE_PATH(path); // path to item, that we are going to deal with.
__u32 * item=0; // pointer to item we are going to deal with __u32 * item=0; // pointer to item we are going to deal with
int item_pos=-1; /* Position in indirect item */
if ( num_pages < 1 ) { if ( num_pages < 1 ) {
...@@ -807,7 +808,6 @@ int reiserfs_prepare_file_region_for_write( ...@@ -807,7 +808,6 @@ int reiserfs_prepare_file_region_for_write(
reiserfs_write_lock(inode->i_sb); // We need that for at least search_by_key() reiserfs_write_lock(inode->i_sb); // We need that for at least search_by_key()
for ( i = 0; i < num_pages ; i++ ) { for ( i = 0; i < num_pages ; i++ ) {
int item_pos=-1; /* Position in indirect item */
head = page_buffers(prepared_pages[i]); head = page_buffers(prepared_pages[i]);
/* For each buffer in the page */ /* For each buffer in the page */
......
...@@ -2109,11 +2109,13 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control ...@@ -2109,11 +2109,13 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control
* someone else could have locked them and sent them down the * someone else could have locked them and sent them down the
* pipe without locking the page * pipe without locking the page
*/ */
bh = head ;
do { do {
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
partial = 1; partial = 1;
break; break;
} }
bh = bh->b_this_page;
} while(bh != head); } while(bh != head);
if (!partial) if (!partial)
SetPageUptodate(page); SetPageUptodate(page);
......
This diff is collapsed.
...@@ -546,12 +546,13 @@ static int print_desc_block (struct buffer_head * bh) ...@@ -546,12 +546,13 @@ static int print_desc_block (struct buffer_head * bh)
{ {
struct reiserfs_journal_desc * desc; struct reiserfs_journal_desc * desc;
desc = (struct reiserfs_journal_desc *)(bh->b_data); if (memcmp(get_journal_desc_magic (bh), JOURNAL_DESC_MAGIC, 8))
if (memcmp(desc->j_magic, JOURNAL_DESC_MAGIC, 8))
return 1; return 1;
desc = (struct reiserfs_journal_desc *)(bh->b_data);
printk ("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)", printk ("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)",
(unsigned long long)bh->b_blocknr, desc->j_trans_id, desc->j_mount_id, desc->j_len); (unsigned long long)bh->b_blocknr, get_desc_trans_id (desc), get_desc_mount_id (desc),
get_desc_trans_len (desc));
return 0; return 0;
} }
......
...@@ -23,9 +23,6 @@ ...@@ -23,9 +23,6 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/vfs.h> #include <linux/vfs.h>
#define REISERFS_OLD_BLOCKSIZE 4096
#define REISERFS_SUPER_MAGIC_STRING_OFFSET_NJ 20
static struct file_system_type reiserfs_fs_type; static struct file_system_type reiserfs_fs_type;
const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING; const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING;
...@@ -500,8 +497,11 @@ static struct export_operations reiserfs_export_ops = { ...@@ -500,8 +497,11 @@ static struct export_operations reiserfs_export_ops = {
mount options that have values rather than being toggles. */ mount options that have values rather than being toggles. */
typedef struct { typedef struct {
char * value; char * value;
int bitmask; /* bit which is to be set in mount_options bitmask when this int setmask; /* bitmask which is to set on mount_options bitmask when this
value is found, 0 is no bits are to be set */ value is found, 0 is no bits are to be changed. */
int clrmask; /* bitmask which is to clear on mount_options bitmask when this
value is found, 0 is no bits are to be changed. This is
applied BEFORE setmask */
} arg_desc_t; } arg_desc_t;
...@@ -511,25 +511,30 @@ typedef struct { ...@@ -511,25 +511,30 @@ typedef struct {
char * option_name; char * option_name;
int arg_required; /* 0 if argument is not required, not 0 otherwise */ int arg_required; /* 0 if argument is not required, not 0 otherwise */
const arg_desc_t * values; /* list of values accepted by an option */ const arg_desc_t * values; /* list of values accepted by an option */
int bitmask; /* bit which is to be set in mount_options bitmask when this int setmask; /* bitmask which is to set on mount_options bitmask when this
option is selected, 0 is not bits are to be set */ value is found, 0 is no bits are to be changed. */
int clrmask; /* bitmask which is to clear on mount_options bitmask when this
value is found, 0 is no bits are to be changed. This is
applied BEFORE setmask */
} opt_desc_t; } opt_desc_t;
/* possible values for "-o block-allocator=" and bits which are to be set in /* possible values for "-o block-allocator=" and bits which are to be set in
s_mount_opt of reiserfs specific part of in-core super block */ s_mount_opt of reiserfs specific part of in-core super block */
static const arg_desc_t balloc[] = { static const arg_desc_t balloc[] = {
{"noborder", REISERFS_NO_BORDER}, {"noborder", 1<<REISERFS_NO_BORDER, 0},
{"no_unhashed_relocation", REISERFS_NO_UNHASHED_RELOCATION}, {"border", 0, 1<<REISERFS_NO_BORDER},
{"hashed_relocation", REISERFS_HASHED_RELOCATION}, {"no_unhashed_relocation", 1<<REISERFS_NO_UNHASHED_RELOCATION, 0},
{"test4", REISERFS_TEST4}, {"hashed_relocation", 1<<REISERFS_HASHED_RELOCATION, 0},
{NULL, -1} {"test4", 1<<REISERFS_TEST4, 0},
{"notest4", 0, 1<<REISERFS_TEST4},
{NULL, 0, 0}
}; };
static const arg_desc_t tails[] = { static const arg_desc_t tails[] = {
{"on", REISERFS_LARGETAIL}, {"on", 1<<REISERFS_LARGETAIL, 1<<REISERFS_SMALLTAIL},
{"off", -1}, {"off", 0, (1<<REISERFS_LARGETAIL)|(1<<REISERFS_SMALLTAIL)},
{"small", REISERFS_SMALLTAIL}, {"small", 1<<REISERFS_SMALLTAIL, 1<<REISERFS_LARGETAIL},
{NULL, 0} {NULL, 0, 0}
}; };
int reiserfs_default_io_size = 128 * 1024; /* Default recommended I/O size is 128k. int reiserfs_default_io_size = 128 * 1024; /* Default recommended I/O size is 128k.
...@@ -571,16 +576,21 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o ...@@ -571,16 +576,21 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o
/* Ugly special case, probably we should redo options parser so that /* Ugly special case, probably we should redo options parser so that
it can understand several arguments for some options, also so that it can understand several arguments for some options, also so that
it can fill several bitfields with option values. */ it can fill several bitfields with option values. */
reiserfs_parse_alloc_options( s, p + 6); if ( reiserfs_parse_alloc_options( s, p + 6) ) {
return 0; return -1;
} else {
return 0;
}
} }
/* for every option in the list */ /* for every option in the list */
for (opt = opts; opt->option_name; opt ++) { for (opt = opts; opt->option_name; opt ++) {
if (!strncmp (p, opt->option_name, strlen (opt->option_name))) { if (!strncmp (p, opt->option_name, strlen (opt->option_name))) {
if (bit_flags && opt->bitmask != -1) if (bit_flags) {
set_bit (opt->bitmask, bit_flags); *bit_flags &= ~opt->clrmask;
*bit_flags |= opt->setmask;
}
break; break;
} }
} }
...@@ -620,7 +630,7 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o ...@@ -620,7 +630,7 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o
} }
if (!opt->values) { if (!opt->values) {
/* *opt_arg contains pointer to argument */ /* *=NULLopt_arg contains pointer to argument */
*opt_arg = p; *opt_arg = p;
return opt->arg_required; return opt->arg_required;
} }
...@@ -628,8 +638,10 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o ...@@ -628,8 +638,10 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o
/* values possible for this option are listed in opt->values */ /* values possible for this option are listed in opt->values */
for (arg = opt->values; arg->value; arg ++) { for (arg = opt->values; arg->value; arg ++) {
if (!strcmp (p, arg->value)) { if (!strcmp (p, arg->value)) {
if (bit_flags && arg->bitmask != -1 ) if (bit_flags) {
set_bit (arg->bitmask, bit_flags); *bit_flags &= ~arg->clrmask;
*bit_flags |= arg->setmask;
}
return opt->arg_required; return opt->arg_required;
} }
} }
...@@ -638,7 +650,6 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o ...@@ -638,7 +650,6 @@ static int reiserfs_getopt ( struct super_block * s, char ** cur, opt_desc_t * o
return -1; return -1;
} }
/* returns 0 if something is wrong in option string, 1 - otherwise */ /* returns 0 if something is wrong in option string, 1 - otherwise */
static int reiserfs_parse_options (struct super_block * s, char * options, /* string given via mount's -o */ static int reiserfs_parse_options (struct super_block * s, char * options, /* string given via mount's -o */
unsigned long * mount_options, unsigned long * mount_options,
...@@ -652,18 +663,18 @@ static int reiserfs_parse_options (struct super_block * s, char * options, /* st ...@@ -652,18 +663,18 @@ static int reiserfs_parse_options (struct super_block * s, char * options, /* st
char * arg = NULL; char * arg = NULL;
char * pos; char * pos;
opt_desc_t opts[] = { opt_desc_t opts[] = {
{"tails", 't', tails, -1}, {"tails", 't', tails, 0, 0}, /* Compatibility stuff, so that -o notail for old setups still work */
{"notail", 0, 0, -1}, /* Compatibility stuff, so that -o notail {"notail", 0, 0, 0, (1<<REISERFS_LARGETAIL)|(1<<REISERFS_SMALLTAIL)},
for old setups still work */ {"conv", 0, 0, 1<<REISERFS_CONVERT, 0},
{"conv", 0, 0, REISERFS_CONVERT}, {"attrs", 0, 0, 1<<REISERFS_ATTRS, 0},
{"attrs", 0, 0, REISERFS_ATTRS}, {"noattrs", 0, 0, 0, 1<<REISERFS_ATTRS},
{"nolog", 0, 0, -1}, {"nolog", 0, 0, 0, 0}, /* This is unsupported */
{"replayonly", 0, 0, REPLAYONLY}, {"replayonly", 0, 0, 1<<REPLAYONLY, 0},
{"block-allocator", 'a', balloc, -1}, {"block-allocator", 'a', balloc, 0, 0},
{"resize", 'r', 0, -1}, {"resize", 'r', 0, 0, 0},
{"jdev", 'j', 0, -1}, {"jdev", 'j', 0, 0, 0},
{"nolargeio", 'w', 0, -1}, {"nolargeio", 'w', 0, 0, 0},
{NULL, 0, 0, -1} {NULL, 0, 0, 0, 0}
}; };
*blocks = 0; *blocks = 0;
...@@ -671,9 +682,6 @@ for old setups still work */ ...@@ -671,9 +682,6 @@ for old setups still work */
/* use default configuration: create tails, journaling on, no /* use default configuration: create tails, journaling on, no
conversion to newest format */ conversion to newest format */
return 1; return 1;
else
/* Drop defaults to zeroes */
*mount_options = 0;
for (pos = options; pos; ) { for (pos = options; pos; ) {
c = reiserfs_getopt (s, &pos, opts, &arg, mount_options); c = reiserfs_getopt (s, &pos, opts, &arg, mount_options);
...@@ -695,11 +703,25 @@ for old setups still work */ ...@@ -695,11 +703,25 @@ for old setups still work */
} }
if ( c == 'w' ) { if ( c == 'w' ) {
reiserfs_default_io_size = PAGE_SIZE; char *p=0;
int val = simple_strtoul (arg, &p, 0);
if ( *p != '\0') {
printk ("reiserfs_parse_options: non-numeric value %s for nolargeio option\n", arg);
return 0;
}
if ( val )
reiserfs_default_io_size = PAGE_SIZE;
else
reiserfs_default_io_size = 128 * 1024;
} }
if (c == 'j') { if (c == 'j') {
if (arg && *arg && jdev_name) { if (arg && *arg && jdev_name) {
if ( *jdev_name ) { //Hm, already assigned?
printk("reiserfs_parse_options: journal device was already specified to be %s\n", *jdev_name);
return 0;
}
*jdev_name = arg; *jdev_name = arg;
} }
} }
...@@ -731,14 +753,28 @@ static int reiserfs_remount (struct super_block * s, int * mount_flags, char * a ...@@ -731,14 +753,28 @@ static int reiserfs_remount (struct super_block * s, int * mount_flags, char * a
struct reiserfs_super_block * rs; struct reiserfs_super_block * rs;
struct reiserfs_transaction_handle th ; struct reiserfs_transaction_handle th ;
unsigned long blocks; unsigned long blocks;
unsigned long mount_options; unsigned long mount_options = REISERFS_SB(s)->s_mount_opt;
unsigned long safe_mask = 0;
rs = SB_DISK_SUPER_BLOCK (s); rs = SB_DISK_SUPER_BLOCK (s);
if (!reiserfs_parse_options(s, arg, &mount_options, &blocks, NULL)) if (!reiserfs_parse_options(s, arg, &mount_options, &blocks, NULL))
return -EINVAL; return -EINVAL;
handle_attrs( s ); handle_attrs(s);
/* Add options that are safe here */
safe_mask |= 1 << REISERFS_SMALLTAIL;
safe_mask |= 1 << REISERFS_LARGETAIL;
safe_mask |= 1 << REISERFS_NO_BORDER;
safe_mask |= 1 << REISERFS_NO_UNHASHED_RELOCATION;
safe_mask |= 1 << REISERFS_HASHED_RELOCATION;
safe_mask |= 1 << REISERFS_TEST4;
safe_mask |= 1 << REISERFS_ATTRS;
/* Update the bitmask, taking care to keep
* the bits we're not allowed to change here */
REISERFS_SB(s)->s_mount_opt = (REISERFS_SB(s)->s_mount_opt & ~safe_mask) | (mount_options & safe_mask);
if(blocks) { if(blocks) {
int rc = reiserfs_resize(s, blocks); int rc = reiserfs_resize(s, blocks);
......
...@@ -51,7 +51,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); ...@@ -51,7 +51,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
/* Thread information allocation. */ /* Thread information allocation. */
#define THREAD_SIZE (2*PAGE_SIZE) #define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() \ #define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -84,7 +84,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -84,7 +84,7 @@ static inline struct thread_info *current_thread_info(void)
#define THREAD_SIZE (8192) #define THREAD_SIZE (8192)
extern struct thread_info *alloc_thread_info(void); extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *); extern void free_thread_info(struct thread_info *);
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1)) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -87,7 +87,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -87,7 +87,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */ /* thread information allocation */
#define THREAD_SIZE (2*PAGE_SIZE) #define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task)
......
...@@ -28,10 +28,10 @@ struct thread_info { ...@@ -28,10 +28,10 @@ struct thread_info {
/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */ /* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */
#if PAGE_SHIFT == 13 /* 8k machines */ #if PAGE_SHIFT == 13 /* 8k machines */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,0)) #define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,0))
#define free_thread_info(ti) free_pages((unsigned long)(ti),0) #define free_thread_info(ti) free_pages((unsigned long)(ti),0)
#else /* otherwise assume 4k pages */ #else /* otherwise assume 4k pages */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,1)) #define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long)(ti),1) #define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#endif /* PAGE_SHIFT == 13 */ #endif /* PAGE_SHIFT == 13 */
......
...@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1)) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -37,7 +37,7 @@ struct thread_info { ...@@ -37,7 +37,7 @@ struct thread_info {
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER)) __get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -54,7 +54,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -54,7 +54,7 @@ static inline struct thread_info *current_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1)) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -52,7 +52,7 @@ struct thread_info { ...@@ -52,7 +52,7 @@ struct thread_info {
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER)) __get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -68,7 +68,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -68,7 +68,7 @@ static inline struct thread_info *current_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,THREAD_ORDER)) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) #define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -78,7 +78,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -78,7 +78,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#endif #endif
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void) BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
#define alloc_thread_info() BTFIXUP_CALL(alloc_thread_info)() #define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti) #define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
......
...@@ -142,10 +142,10 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -142,10 +142,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* thread information allocation */ /* thread information allocation */
#if PAGE_SHIFT == 13 #if PAGE_SHIFT == 13
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 1)) #define alloc_thread_info(tsk)((struct thread_info *)__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long)(ti),1) #define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#else /* PAGE_SHIFT == 13 */ #else /* PAGE_SHIFT == 13 */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 0)) #define alloc_thread_info(tsk)((struct thread_info *)__get_free_pages(GFP_KERNEL, 0))
#define free_thread_info(ti) free_pages((unsigned long)(ti),0) #define free_thread_info(ti) free_pages((unsigned long)(ti),0)
#endif /* PAGE_SHIFT == 13 */ #endif /* PAGE_SHIFT == 13 */
......
...@@ -49,7 +49,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -49,7 +49,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */ /* thread information allocation */
#define THREAD_SIZE (4*PAGE_SIZE) #define THREAD_SIZE (4*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,2)) __get_free_pages(GFP_KERNEL,2))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 2) #define free_thread_info(ti) free_pages((unsigned long) (ti), 2)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -54,7 +54,7 @@ struct thread_info { ...@@ -54,7 +54,7 @@ struct thread_info {
*/ */
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \ #define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1)) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -73,7 +73,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -73,7 +73,7 @@ static inline struct thread_info *stack_thread_info(void)
} }
/* thread information allocation */ /* thread information allocation */
#define alloc_thread_info() \ #define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task) #define get_thread_info(ti) get_task_struct((ti)->task)
......
...@@ -41,9 +41,9 @@ struct kioctx; ...@@ -41,9 +41,9 @@ struct kioctx;
#define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags)
#define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags) #define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
#define kiocbIsLocked(iocb) test_bit(0, &(iocb)->ki_flags) #define kiocbIsLocked(iocb) test_bit(KIF_LOCKED, &(iocb)->ki_flags)
#define kiocbIsKicked(iocb) test_bit(1, &(iocb)->ki_flags) #define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags)
#define kiocbIsCancelled(iocb) test_bit(2, &(iocb)->ki_flags) #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
struct kiocb { struct kiocb {
struct list_head ki_run_list; struct list_head ki_run_list;
......
...@@ -72,6 +72,12 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) ...@@ -72,6 +72,12 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_HUGETLBFS #ifdef CONFIG_HUGETLBFS
struct hugetlbfs_config {
uid_t uid;
gid_t gid;
umode_t mode;
};
extern struct file_operations hugetlbfs_file_operations; extern struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops; extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t); struct file *hugetlb_zero_setup(size_t);
......
...@@ -1631,29 +1631,43 @@ struct reiserfs_iget_args { ...@@ -1631,29 +1631,43 @@ struct reiserfs_iget_args {
/***************************************************************************/ /***************************************************************************/
/*#ifdef __KERNEL__*/ /*#ifdef __KERNEL__*/
#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
/* journal.c see journal.c for all the comments here */ #define journal_trans_half(blocksize) \
((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32))
#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */
/* journal.c see journal.c for all the comments here */
/* first block written in a commit. */ /* first block written in a commit. */
struct reiserfs_journal_desc { struct reiserfs_journal_desc {
__u32 j_trans_id ; /* id of commit */ __u32 j_trans_id ; /* id of commit */
__u32 j_len ; /* length of commit. len +1 is the commit block */ __u32 j_len ; /* length of commit. len +1 is the commit block */
__u32 j_mount_id ; /* mount id of this trans*/ __u32 j_mount_id ; /* mount id of this trans*/
__u32 j_realblock[JOURNAL_TRANS_HALF] ; /* real locations for each block */ __u32 j_realblock[1] ; /* real locations for each block */
char j_magic[12] ;
} ; } ;
#define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id)
#define get_desc_trans_len(d) le32_to_cpu((d)->j_len)
#define get_desc_mount_id(d) le32_to_cpu((d)->j_mount_id)
#define set_desc_trans_id(d,val) do { (d)->j_trans_id = cpu_to_le32 (val); } while (0)
#define set_desc_trans_len(d,val) do { (d)->j_len = cpu_to_le32 (val); } while (0)
#define set_desc_mount_id(d,val) do { (d)->j_mount_id = cpu_to_le32 (val); } while (0)
/* last block written in a commit */ /* last block written in a commit */
struct reiserfs_journal_commit { struct reiserfs_journal_commit {
__u32 j_trans_id ; /* must match j_trans_id from the desc block */ __u32 j_trans_id ; /* must match j_trans_id from the desc block */
__u32 j_len ; /* ditto */ __u32 j_len ; /* ditto */
__u32 j_realblock[JOURNAL_TRANS_HALF] ; /* real locations for each block */ __u32 j_realblock[1] ; /* real locations for each block */
char j_digest[16] ; /* md5 sum of all the blocks involved, including desc and commit. not used, kill it */
} ; } ;
#define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id)
#define get_commit_trans_len(c) le32_to_cpu((c)->j_len)
#define get_commit_mount_id(c) le32_to_cpu((c)->j_mount_id)
#define set_commit_trans_id(c,val) do { (c)->j_trans_id = cpu_to_le32 (val); } while (0)
#define set_commit_trans_len(c,val) do { (c)->j_len = cpu_to_le32 (val); } while (0)
/* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the /* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk, ** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk,
** and this transaction does not need to be replayed. ** and this transaction does not need to be replayed.
......
...@@ -253,6 +253,8 @@ struct reiserfs_journal { ...@@ -253,6 +253,8 @@ struct reiserfs_journal {
struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for all the real buffer heads in all struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for all the real buffer heads in all
the transactions */ the transactions */
struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
unsigned long j_max_trans_size ;
unsigned long j_max_batch_size ;
}; };
#define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */ #define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */
......
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static kmem_cache_t *task_struct_cachep;
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_semundo(struct task_struct *tsk); extern void exit_semundo(struct task_struct *tsk);
...@@ -74,7 +72,13 @@ int nr_processes(void) ...@@ -74,7 +72,13 @@ int nr_processes(void)
return total; return total;
} }
static void free_task_struct(struct task_struct *tsk) #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static kmem_cache_t *task_struct_cachep;
#endif
static void free_task(struct task_struct *tsk)
{ {
/* /*
* The task cache is effectively disabled right now. * The task cache is effectively disabled right now.
...@@ -84,14 +88,14 @@ static void free_task_struct(struct task_struct *tsk) ...@@ -84,14 +88,14 @@ static void free_task_struct(struct task_struct *tsk)
*/ */
if (tsk != current) { if (tsk != current) {
free_thread_info(tsk->thread_info); free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk); free_task_struct(tsk);
} else { } else {
int cpu = get_cpu(); int cpu = get_cpu();
tsk = task_cache[cpu]; tsk = task_cache[cpu];
if (tsk) { if (tsk) {
free_thread_info(tsk->thread_info); free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk); free_task_struct(tsk);
} }
task_cache[cpu] = current; task_cache[cpu] = current;
put_cpu(); put_cpu();
...@@ -106,7 +110,7 @@ void __put_task_struct(struct task_struct *tsk) ...@@ -106,7 +110,7 @@ void __put_task_struct(struct task_struct *tsk)
security_task_free(tsk); security_task_free(tsk);
free_uid(tsk->user); free_uid(tsk->user);
free_task_struct(tsk); free_task(tsk);
} }
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
...@@ -186,6 +190,7 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync) ...@@ -186,6 +190,7 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync)
void __init fork_init(unsigned long mempages) void __init fork_init(unsigned long mempages)
{ {
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
/* create a slab on which task_structs can be allocated */ /* create a slab on which task_structs can be allocated */
task_struct_cachep = task_struct_cachep =
kmem_cache_create("task_struct", kmem_cache_create("task_struct",
...@@ -193,6 +198,7 @@ void __init fork_init(unsigned long mempages) ...@@ -193,6 +198,7 @@ void __init fork_init(unsigned long mempages)
SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
if (!task_struct_cachep) if (!task_struct_cachep)
panic("fork_init(): cannot create task_struct SLAB cache"); panic("fork_init(): cannot create task_struct SLAB cache");
#endif
/* /*
* The default maximum number of threads is set to a safe * The default maximum number of threads is set to a safe
...@@ -222,13 +228,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -222,13 +228,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
task_cache[cpu] = NULL; task_cache[cpu] = NULL;
put_cpu(); put_cpu();
if (!tsk) { if (!tsk) {
ti = alloc_thread_info(); tsk = alloc_task_struct();
if (!ti) if (!tsk)
return NULL; return NULL;
tsk = kmem_cache_alloc(task_struct_cachep, GFP_KERNEL); ti = alloc_thread_info(tsk);
if (!tsk) { if (!ti) {
free_thread_info(ti); free_task_struct(tsk);
return NULL; return NULL;
} }
} else } else
...@@ -1041,7 +1047,7 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1041,7 +1047,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
atomic_dec(&p->user->processes); atomic_dec(&p->user->processes);
free_uid(p->user); free_uid(p->user);
bad_fork_free: bad_fork_free:
free_task_struct(p); free_task(p);
goto fork_out; goto fork_out;
} }
......
...@@ -1226,47 +1226,36 @@ void __init init_timers(void) ...@@ -1226,47 +1226,36 @@ void __init init_timers(void)
} }
#ifdef CONFIG_TIME_INTERPOLATION #ifdef CONFIG_TIME_INTERPOLATION
volatile unsigned long last_nsec_offset; volatile unsigned long last_nsec_offset;
struct time_interpolator *time_interpolator;
#ifndef __HAVE_ARCH_CMPXCHG #ifndef __HAVE_ARCH_CMPXCHG
spinlock_t last_nsec_offset_lock = SPIN_LOCK_UNLOCKED; spinlock_t last_nsec_offset_lock = SPIN_LOCK_UNLOCKED;
#endif #endif
static struct { struct time_interpolator *time_interpolator;
spinlock_t lock; /* lock protecting list */ struct time_interpolator *time_interpolator_list;
struct time_interpolator *list; /* list of registered interpolators */ spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
} ti_global = {
.lock = SPIN_LOCK_UNLOCKED
};
static inline int static inline int
is_better_time_interpolator(struct time_interpolator *new) is_better_time_interpolator(struct time_interpolator *new)
{ {
if (!time_interpolator) if (!time_interpolator)
return 1; return 1;
return new->frequency > 2*time_interpolator->frequency return new->frequency > 2*time_interpolator->frequency ||
|| (unsigned long) new->drift < (unsigned long) time_interpolator->drift; (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
} }
void void
register_time_interpolator(struct time_interpolator *ti) register_time_interpolator(struct time_interpolator *ti)
{ {
spin_lock(&ti_global.lock); spin_lock(&time_interpolator_lock);
{ write_seqlock_irq(&xtime_lock);
write_seqlock_irq(&xtime_lock); if (is_better_time_interpolator(ti))
{ time_interpolator = ti;
if (is_better_time_interpolator(ti)) write_sequnlock_irq(&xtime_lock);
time_interpolator = ti;
} ti->next = time_interpolator_list;
write_sequnlock_irq(&xtime_lock); time_interpolator_list = ti;
spin_unlock(&time_interpolator_lock);
ti->next = ti_global.list;
ti_global.list = ti;
}
spin_unlock(&ti_global.lock);
} }
void void
...@@ -1274,30 +1263,26 @@ unregister_time_interpolator(struct time_interpolator *ti) ...@@ -1274,30 +1263,26 @@ unregister_time_interpolator(struct time_interpolator *ti)
{ {
struct time_interpolator *curr, **prev; struct time_interpolator *curr, **prev;
spin_lock(&ti_global.lock); spin_lock(&time_interpolator_lock);
{ prev = &time_interpolator_list;
prev = &ti_global.list; for (curr = *prev; curr; curr = curr->next) {
for (curr = *prev; curr; curr = curr->next) { if (curr == ti) {
if (curr == ti) { *prev = curr->next;
*prev = curr->next; break;
break;
}
prev = &curr->next;
}
write_seqlock_irq(&xtime_lock);
{
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
time_interpolator = NULL;
/* find the next-best interpolator */
for (curr = ti_global.list; curr; curr = curr->next)
if (is_better_time_interpolator(curr))
time_interpolator = curr;
}
} }
write_sequnlock_irq(&xtime_lock); prev = &curr->next;
} }
spin_unlock(&ti_global.lock);
}
write_seqlock_irq(&xtime_lock);
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
time_interpolator = NULL;
/* find the next-best interpolator */
for (curr = time_interpolator_list; curr; curr = curr->next)
if (is_better_time_interpolator(curr))
time_interpolator = curr;
}
write_sequnlock_irq(&xtime_lock);
spin_unlock(&time_interpolator_lock);
}
#endif /* CONFIG_TIME_INTERPOLATION */ #endif /* CONFIG_TIME_INTERPOLATION */
...@@ -81,8 +81,6 @@ void __remove_from_page_cache(struct page *page) ...@@ -81,8 +81,6 @@ void __remove_from_page_cache(struct page *page)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
BUG_ON(PageDirty(page) && !PageSwapCache(page));
radix_tree_delete(&mapping->page_tree, page->index); radix_tree_delete(&mapping->page_tree, page->index);
list_del(&page->list); list_del(&page->list);
page->mapping = NULL; page->mapping = NULL;
...@@ -1410,6 +1408,11 @@ void remove_suid(struct dentry *dentry) ...@@ -1410,6 +1408,11 @@ void remove_suid(struct dentry *dentry)
} }
} }
/*
* Copy as much as we can into the page and return the number of bytes which
* were sucessfully copied. If a fault is encountered then clear the page
* out to (offset+bytes) and return the number of bytes which were copied.
*/
static inline size_t static inline size_t
filemap_copy_from_user(struct page *page, unsigned long offset, filemap_copy_from_user(struct page *page, unsigned long offset,
const char __user *buf, unsigned bytes) const char __user *buf, unsigned bytes)
...@@ -1427,30 +1430,42 @@ filemap_copy_from_user(struct page *page, unsigned long offset, ...@@ -1427,30 +1430,42 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
left = __copy_from_user(kaddr + offset, buf, bytes); left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap(page); kunmap(page);
} }
return left ? 0 : bytes; return bytes - left;
} }
static size_t static size_t
__filemap_copy_from_user_iovec(char *vaddr, __filemap_copy_from_user_iovec(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes) const struct iovec *iov, size_t base, size_t bytes)
{ {
size_t copied = 0; size_t copied = 0, left = 0;
while (bytes) { while (bytes) {
char __user *buf = iov->iov_base + base; char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base); int copy = min(bytes, iov->iov_len - base);
base = 0; base = 0;
if (__copy_from_user(vaddr, buf, copy)) left = __copy_from_user(vaddr, buf, copy);
break;
copied += copy; copied += copy;
bytes -= copy; bytes -= copy;
vaddr += copy; vaddr += copy;
iov++; iov++;
if (unlikely(left)) {
/* zero the rest of the target like __copy_from_user */
if (bytes)
memset(vaddr, 0, bytes);
break;
}
} }
return copied; return copied - left;
} }
/*
* This has the same sideeffects and return value as filemap_copy_from_user().
* The difference is that on a fault we need to memset the remainder of the
* page (out to offset+bytes), to emulate filemap_copy_from_user()'s
* single-segment behaviour.
*/
static inline size_t static inline size_t
filemap_copy_from_user_iovec(struct page *page, unsigned long offset, filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
const struct iovec *iov, size_t base, size_t bytes) const struct iovec *iov, size_t base, size_t bytes)
...@@ -1718,8 +1733,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, ...@@ -1718,8 +1733,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
copied = filemap_copy_from_user_iovec(page, offset, copied = filemap_copy_from_user_iovec(page, offset,
cur_iov, iov_base, bytes); cur_iov, iov_base, bytes);
flush_dcache_page(page); flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, status = a_ops->commit_write(file, page, offset, offset+bytes);
offset + copied);
if (likely(copied > 0)) { if (likely(copied > 0)) {
if (!status) if (!status)
status = copied; status = copied;
......
...@@ -286,10 +286,14 @@ static int conf_choice(struct menu *menu) ...@@ -286,10 +286,14 @@ static int conf_choice(struct menu *menu)
break; break;
} }
} else { } else {
sym->user = sym->curr; switch (sym_get_tristate_value(sym)) {
if (sym->curr.tri == mod) { case no:
return 1;
case mod:
printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu)); printf("%*s%s\n", indent - 1, "", menu_get_prompt(menu));
return 0; return 0;
case yes:
break;
} }
} }
......
...@@ -243,7 +243,8 @@ int conf_read(const char *name) ...@@ -243,7 +243,8 @@ int conf_read(const char *name)
prop = sym_get_choice_prop(sym); prop = sym_get_choice_prop(sym);
sym->flags &= ~SYMBOL_NEW; sym->flags &= ~SYMBOL_NEW;
for (e = prop->expr; e; e = e->left.expr) for (e = prop->expr; e; e = e->left.expr)
sym->flags |= e->right.sym->flags & SYMBOL_NEW; if (e->right.sym->visible != no)
sym->flags |= e->right.sym->flags & SYMBOL_NEW;
} }
sym_change_count = 1; sym_change_count = 1;
......
...@@ -221,11 +221,18 @@ void menu_finalize(struct menu *parent) ...@@ -221,11 +221,18 @@ void menu_finalize(struct menu *parent)
for (menu = parent->list; menu; menu = menu->next) { for (menu = parent->list; menu; menu = menu->next) {
if (sym && sym_is_choice(sym) && menu->sym) { if (sym && sym_is_choice(sym) && menu->sym) {
menu->sym->flags |= SYMBOL_CHOICEVAL; menu->sym->flags |= SYMBOL_CHOICEVAL;
if (!menu->prompt)
fprintf(stderr, "%s:%d:warning: choice value must have a prompt\n",
menu->file->name, menu->lineno);
for (prop = menu->sym->prop; prop; prop = prop->next) { for (prop = menu->sym->prop; prop; prop = prop->next) {
if (prop->type != P_DEFAULT) if (prop->type == P_PROMPT && prop->menu != menu) {
continue; fprintf(stderr, "%s:%d:warning: choice values currently only support a single prompt\n",
fprintf(stderr, "%s:%d:warning: defaults for choice values not supported\n", prop->file->name, prop->lineno);
prop->file->name, prop->lineno);
}
if (prop->type == P_DEFAULT)
fprintf(stderr, "%s:%d:warning: defaults for choice values not supported\n",
prop->file->name, prop->lineno);
} }
current_entry = menu; current_entry = menu;
menu_set_type(sym->type); menu_set_type(sym->type);
...@@ -311,14 +318,6 @@ bool menu_is_visible(struct menu *menu) ...@@ -311,14 +318,6 @@ bool menu_is_visible(struct menu *menu)
} else } else
visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr); visible = menu->prompt->visible.tri = expr_calc_value(menu->prompt->visible.expr);
if (sym && sym_is_choice(sym)) {
for (child = menu->list; child; child = child->next)
if (menu_is_visible(child))
break;
if (!child)
return false;
}
if (visible != no) if (visible != no)
return true; return true;
if (!sym || sym_get_tristate_value(menu->sym) == no) if (!sym || sym_get_tristate_value(menu->sym) == no)
......
...@@ -201,6 +201,9 @@ static struct symbol *sym_calc_choice(struct symbol *sym) ...@@ -201,6 +201,9 @@ static struct symbol *sym_calc_choice(struct symbol *sym)
if (def_sym->visible != no) if (def_sym->visible != no)
return def_sym; return def_sym;
} }
/* no choice? reset tristate value */
sym->curr.tri = no;
return NULL; return NULL;
} }
......
...@@ -580,15 +580,17 @@ static void set_ac3(struct cm_state *s, unsigned rate) ...@@ -580,15 +580,17 @@ static void set_ac3(struct cm_state *s, unsigned rate)
spin_unlock_irqrestore(&s->lock, flags); spin_unlock_irqrestore(&s->lock, flags);
} }
static void trans_ac3(struct cm_state *s, void *dest, const char *source, int size) static int trans_ac3(struct cm_state *s, void *dest, const char *source, int size)
{ {
int i = size / 2; int i = size / 2;
int err;
unsigned long data; unsigned long data;
unsigned long *dst = (unsigned long *) dest; unsigned long *dst = (unsigned long *) dest;
unsigned short *src = (unsigned short *)source; unsigned short *src = (unsigned short *)source;
do { do {
data = (unsigned long) *src++; if ((err = __get_user(data, src++)))
return err;
data <<= 12; // ok for 16-bit data data <<= 12; // ok for 16-bit data
if (s->spdif_counter == 2 || s->spdif_counter == 3) if (s->spdif_counter == 2 || s->spdif_counter == 3)
data |= 0x40000000; // indicate AC-3 raw data data |= 0x40000000; // indicate AC-3 raw data
...@@ -605,6 +607,8 @@ static void trans_ac3(struct cm_state *s, void *dest, const char *source, int si ...@@ -605,6 +607,8 @@ static void trans_ac3(struct cm_state *s, void *dest, const char *source, int si
if (s->spdif_counter == 384) if (s->spdif_counter == 384)
s->spdif_counter = 0; s->spdif_counter = 0;
} while (--i); } while (--i);
return 0;
} }
static void set_adc_rate_unlocked(struct cm_state *s, unsigned rate) static void set_adc_rate_unlocked(struct cm_state *s, unsigned rate)
...@@ -1655,13 +1659,16 @@ static ssize_t cm_write(struct file *file, const char *buffer, size_t count, lof ...@@ -1655,13 +1659,16 @@ static ssize_t cm_write(struct file *file, const char *buffer, size_t count, lof
continue; continue;
} }
if (s->status & DO_AC3_SW) { if (s->status & DO_AC3_SW) {
int err;
// clip exceeded data, caught by 033 and 037 // clip exceeded data, caught by 033 and 037
if (swptr + 2 * cnt > s->dma_dac.dmasize) if (swptr + 2 * cnt > s->dma_dac.dmasize)
cnt = (s->dma_dac.dmasize - swptr) / 2; cnt = (s->dma_dac.dmasize - swptr) / 2;
trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt); if ((err = trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt)))
return err;
swptr = (swptr + 2 * cnt) % s->dma_dac.dmasize; swptr = (swptr + 2 * cnt) % s->dma_dac.dmasize;
} else if (s->status & DO_DUAL_DAC) { } else if (s->status & DO_DUAL_DAC) {
int i; int i, err;
unsigned long *src, *dst0, *dst1; unsigned long *src, *dst0, *dst1;
src = (unsigned long *) buffer; src = (unsigned long *) buffer;
...@@ -1669,8 +1676,10 @@ static ssize_t cm_write(struct file *file, const char *buffer, size_t count, lof ...@@ -1669,8 +1676,10 @@ static ssize_t cm_write(struct file *file, const char *buffer, size_t count, lof
dst1 = (unsigned long *) (s->dma_adc.rawbuf + swptr); dst1 = (unsigned long *) (s->dma_adc.rawbuf + swptr);
// copy left/right sample at one time // copy left/right sample at one time
for (i = 0; i <= cnt / 4; i++) { for (i = 0; i <= cnt / 4; i++) {
*dst0++ = *src++; if ((err = __get_user(*dst0++, src++)))
*dst1++ = *src++; return err;
if ((err = __get_user(*dst1++, src++)))
return err;
} }
swptr = (swptr + cnt) % s->dma_dac.dmasize; swptr = (swptr + cnt) % s->dma_dac.dmasize;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment