Commit 7962ad19 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] fs/locks.c: Only yield once for flocks

This patch removes the annoying and confusing `wait' argument from
many places.  The only change in behaviour is that we now yield once
when unblocking other BSD-style flocks instead of once for each lock.

This slightly improves the semantics for userspace.  Before, when we
had two tasks waiting on a lock, the first one would receive the lock.
Now, the one with the highest priority receives the lock.
parent 07900f7a
......@@ -438,25 +438,16 @@ void locks_notify_blocked(struct file_lock *waiter)
* If told to wait then schedule the processes until the block list
* is empty, otherwise empty the block list ourselves.
*/
static void locks_wake_up_blocks(struct file_lock *blocker, unsigned int wait)
static void locks_wake_up_blocks(struct file_lock *blocker)
{
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter = list_entry(blocker->fl_block.next, struct file_lock, fl_block);
if (wait) {
locks_notify_blocked(waiter);
/* Let the blocked process remove waiter from the
* block list when it gets scheduled.
*/
yield();
} else {
/* Remove waiter from the block list, because by the
* time it wakes up blocker won't exist any more.
*/
locks_delete_block(waiter);
locks_notify_blocked(waiter);
}
}
}
/* Insert file lock fl into an inode's lock list at the position indicated
......@@ -492,7 +483,7 @@ static inline void _unhash_lock(struct file_lock **thisfl_p)
* notify the FS that the lock has been cleared and
* finally free the lock.
*/
static inline void _delete_lock(struct file_lock *fl, unsigned int wait)
static inline void _delete_lock(struct file_lock *fl)
{
fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
if (fl->fl_fasync != NULL){
......@@ -503,19 +494,19 @@ static inline void _delete_lock(struct file_lock *fl, unsigned int wait)
if (fl->fl_remove)
fl->fl_remove(fl);
locks_wake_up_blocks(fl, wait);
locks_wake_up_blocks(fl);
locks_free_lock(fl);
}
/*
* Delete a lock and then free it.
*/
static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait)
static void locks_delete_lock(struct file_lock **thisfl_p)
{
struct file_lock *fl = *thisfl_p;
_unhash_lock(thisfl_p);
_delete_lock(fl, wait);
_delete_lock(fl);
}
/*
......@@ -534,7 +525,7 @@ static inline void locks_unlock_delete(struct file_lock **thisfl_p)
fl->fl_type = F_UNLCK;
lock(fl->fl_file, F_SETLK, fl);
}
_delete_lock(fl, 0);
_delete_lock(fl);
}
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
......@@ -806,13 +797,15 @@ static int flock_lock_file(struct file *filp, unsigned int lock_type,
*/
if (change) {
/* N.B. What if the wait argument is false? */
locks_delete_lock(before, !unlock);
locks_delete_lock(before);
if (!unlock) {
yield();
/*
* If we waited, another lock may have been added ...
*/
if (!unlock)
goto search;
}
}
if (unlock)
goto out;
......@@ -944,7 +937,7 @@ int posix_lock_file(struct file *filp, struct file_lock *caller,
else
caller->fl_end = fl->fl_end;
if (added) {
locks_delete_lock(before, 0);
locks_delete_lock(before);
continue;
}
caller = fl;
......@@ -974,7 +967,7 @@ int posix_lock_file(struct file *filp, struct file_lock *caller,
* one (This may happen several times).
*/
if (added) {
locks_delete_lock(before, 0);
locks_delete_lock(before);
continue;
}
/* Replace the old lock with the new one.
......@@ -982,7 +975,7 @@ int posix_lock_file(struct file *filp, struct file_lock *caller,
* as the change in lock type might satisfy
* their needs.
*/
locks_wake_up_blocks(fl, 0); /* This cannot schedule()! */
locks_wake_up_blocks(fl);
fl->fl_start = caller->fl_start;
fl->fl_end = caller->fl_end;
fl->fl_type = caller->fl_type;
......@@ -1016,11 +1009,11 @@ int posix_lock_file(struct file *filp, struct file_lock *caller,
locks_insert_lock(before, left);
}
right->fl_start = caller->fl_end + 1;
locks_wake_up_blocks(right, 0);
locks_wake_up_blocks(right);
}
if (left) {
left->fl_end = caller->fl_start - 1;
locks_wake_up_blocks(left, 0);
locks_wake_up_blocks(left);
}
out:
unlock_kernel();
......@@ -1129,7 +1122,7 @@ int __get_lease(struct inode *inode, unsigned int mode)
error = locks_block_on_timeout(flock, new_fl, error);
if (error == 0) {
/* We timed out. Unilaterally break the lease. */
locks_delete_lock(&inode->i_flock, 0);
locks_delete_lock(&inode->i_flock);
printk(KERN_WARNING "lease timed out\n");
} else if (error > 0) {
flock = inode->i_flock;
......@@ -1192,14 +1185,14 @@ static int lease_modify(struct file_lock **before, int arg, int fd, struct file
if (error < 0)
goto out;
locks_wake_up_blocks(fl, 0);
locks_wake_up_blocks(fl);
if (arg == F_UNLCK) {
filp->f_owner.pid = 0;
filp->f_owner.uid = 0;
filp->f_owner.euid = 0;
filp->f_owner.signum = 0;
locks_delete_lock(before, 0);
locks_delete_lock(before);
fasync_helper(fd, filp, 0, &fl->fl_fasync);
}
......@@ -1675,7 +1668,7 @@ void locks_remove_flock(struct file *filp)
while ((fl = *before) != NULL) {
if ((IS_FLOCK(fl) || IS_LEASE(fl)) && (fl->fl_file == filp)) {
locks_delete_lock(before, 0);
locks_delete_lock(before);
continue;
}
before = &fl->fl_next;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment