Commit 6ed499a9 authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

[PATCH] Make sure sunrpc/cache doesn't confuse writers with readers.

When a sunrpc/cache channel is not open for reading, the
cache doesn't bother making and waiting for up-calls.

However it doesn't currently distingish between open-for-read/write
and open-for-write, so an op-for-write will look like a reader
and will cause inappropriate waiting.

This patch checks if a file is open-for-read and will only register
a file as a reader if it really is one.
parent 1f494fc0
...@@ -738,19 +738,22 @@ cache_ioctl(struct inode *ino, struct file *filp, ...@@ -738,19 +738,22 @@ cache_ioctl(struct inode *ino, struct file *filp,
static int static int
cache_open(struct inode *inode, struct file *filp) cache_open(struct inode *inode, struct file *filp)
{ {
struct cache_reader *rp; struct cache_reader *rp = NULL;
struct cache_detail *cd = PDE(inode)->data;
rp = kmalloc(sizeof(*rp), GFP_KERNEL); if (filp->f_mode & FMODE_READ) {
if (!rp) struct cache_detail *cd = PDE(inode)->data;
return -ENOMEM;
rp->page = NULL; rp = kmalloc(sizeof(*rp), GFP_KERNEL);
rp->offset = 0; if (!rp)
rp->q.reader = 1; return -ENOMEM;
atomic_inc(&cd->readers); rp->page = NULL;
spin_lock(&queue_lock); rp->offset = 0;
list_add(&rp->q.list, &cd->queue); rp->q.reader = 1;
spin_unlock(&queue_lock); atomic_inc(&cd->readers);
spin_lock(&queue_lock);
list_add(&rp->q.list, &cd->queue);
spin_unlock(&queue_lock);
}
filp->private_data = rp; filp->private_data = rp;
return 0; return 0;
} }
...@@ -761,29 +764,31 @@ cache_release(struct inode *inode, struct file *filp) ...@@ -761,29 +764,31 @@ cache_release(struct inode *inode, struct file *filp)
struct cache_reader *rp = filp->private_data; struct cache_reader *rp = filp->private_data;
struct cache_detail *cd = PDE(inode)->data; struct cache_detail *cd = PDE(inode)->data;
spin_lock(&queue_lock); if (rp) {
if (rp->offset) { spin_lock(&queue_lock);
struct cache_queue *cq; if (rp->offset) {
for (cq= &rp->q; &cq->list != &cd->queue; struct cache_queue *cq;
cq = list_entry(cq->list.next, struct cache_queue, list)) for (cq= &rp->q; &cq->list != &cd->queue;
if (!cq->reader) { cq = list_entry(cq->list.next, struct cache_queue, list))
container_of(cq, struct cache_request, q) if (!cq->reader) {
->readers--; container_of(cq, struct cache_request, q)
break; ->readers--;
} break;
rp->offset = 0; }
} rp->offset = 0;
list_del(&rp->q.list); }
spin_unlock(&queue_lock); list_del(&rp->q.list);
spin_unlock(&queue_lock);
if (rp->page) if (rp->page)
kfree(rp->page); kfree(rp->page);
filp->private_data = NULL; filp->private_data = NULL;
kfree(rp); kfree(rp);
cd->last_close = get_seconds(); cd->last_close = get_seconds();
atomic_dec(&cd->readers); atomic_dec(&cd->readers);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment