Commit 2c336117 authored by James Simmons's avatar James Simmons

Merge maxwell.earthlink.net:/usr/src/linus-2.5

into maxwell.earthlink.net:/usr/src/fbdev-2.5
parents 39c6e0a0 23850f8d
......@@ -76,7 +76,7 @@ static int __init aio_setup(void)
aio_wq = create_workqueue("aio");
printk(KERN_NOTICE "aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
return 0;
}
......@@ -1193,7 +1193,7 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb *iocb,
if (NULL != cancel) {
struct io_event tmp;
printk("calling cancel\n");
pr_debug("calling cancel\n");
memset(&tmp, 0, sizeof(tmp));
tmp.obj = (u64)(unsigned long)kiocb->ki_user_obj;
tmp.data = kiocb->ki_user_data;
......
......@@ -143,15 +143,9 @@ static LIST_HEAD(blocked_list);
static kmem_cache_t *filelock_cache;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(int account)
static struct file_lock *locks_alloc_lock(void)
{
struct file_lock *fl;
if (account && current->locks >= current->rlim[RLIMIT_LOCKS].rlim_cur)
return NULL;
fl = kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
if (fl)
current->locks++;
return fl;
return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
}
/* Free a lock which is not in use. */
......@@ -161,7 +155,6 @@ static inline void locks_free_lock(struct file_lock *fl)
BUG();
return;
}
current->locks--;
if (waitqueue_active(&fl->fl_wait))
panic("Attempting to free lock with active wait queue");
......@@ -248,7 +241,7 @@ static int flock_make_lock(struct file *filp,
if (type < 0)
return type;
fl = locks_alloc_lock(1);
fl = locks_alloc_lock();
if (fl == NULL)
return -ENOMEM;
......@@ -382,7 +375,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
/* Allocate a file_lock initialised to this type of lease */
static int lease_alloc(struct file *filp, int type, struct file_lock **flp)
{
struct file_lock *fl = locks_alloc_lock(1);
struct file_lock *fl = locks_alloc_lock();
if (fl == NULL)
return -ENOMEM;
......@@ -427,13 +420,22 @@ posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
/* Remove waiter from blocker's block list.
* When blocker ends up pointing to itself then the list is empty.
*/
static void locks_delete_block(struct file_lock *waiter)
static inline void __locks_delete_block(struct file_lock *waiter)
{
list_del_init(&waiter->fl_block);
list_del_init(&waiter->fl_link);
waiter->fl_next = NULL;
}
/*
*/
static void locks_delete_block(struct file_lock *waiter)
{
lock_kernel();
__locks_delete_block(waiter);
unlock_kernel();
}
/* Insert waiter into blocker's block list.
* We use a circular list so that processes can be easily woken up in
* the order they blocked. The documentation doesn't require this but
......@@ -446,7 +448,7 @@ static void locks_insert_block(struct file_lock *blocker,
printk(KERN_ERR "locks_insert_block: removing duplicated lock "
"(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid,
waiter->fl_start, waiter->fl_end, waiter->fl_type);
locks_delete_block(waiter);
__locks_delete_block(waiter);
}
list_add_tail(&waiter->fl_block, &blocker->fl_block);
waiter->fl_next = blocker;
......@@ -462,7 +464,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
while (!list_empty(&blocker->fl_block)) {
struct file_lock *waiter = list_entry(blocker->fl_block.next,
struct file_lock, fl_block);
locks_delete_block(waiter);
__locks_delete_block(waiter);
if (waiter->fl_notify)
waiter->fl_notify(waiter);
else
......@@ -589,7 +591,7 @@ static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *w
int result;
locks_insert_block(blocker, waiter);
result = interruptible_sleep_on_locked(&waiter->fl_wait, time);
locks_delete_block(waiter);
__locks_delete_block(waiter);
return result;
}
......@@ -726,8 +728,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request)
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
*/
new_fl = locks_alloc_lock(0);
new_fl2 = locks_alloc_lock(0);
new_fl = locks_alloc_lock();
new_fl2 = locks_alloc_lock();
lock_kernel();
if (request->fl_type != F_UNLCK) {
......@@ -977,9 +979,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
continue;
}
lock_kernel();
locks_delete_block(&fl);
unlock_kernel();
break;
}
......@@ -1332,9 +1332,7 @@ asmlinkage long sys_flock(unsigned int fd, unsigned int cmd)
if (!error)
continue;
lock_kernel();
locks_delete_block(lock);
unlock_kernel();
break;
}
......@@ -1416,7 +1414,7 @@ int fcntl_getlk(struct file *filp, struct flock *l)
*/
int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock *l)
{
struct file_lock *file_lock = locks_alloc_lock(0);
struct file_lock *file_lock = locks_alloc_lock();
struct flock flock;
struct inode *inode;
int error;
......@@ -1489,9 +1487,7 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock *l)
if (!error)
continue;
lock_kernel();
locks_delete_block(file_lock);
unlock_kernel();
break;
}
......@@ -1556,7 +1552,7 @@ int fcntl_getlk64(struct file *filp, struct flock64 *l)
*/
int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
{
struct file_lock *file_lock = locks_alloc_lock(0);
struct file_lock *file_lock = locks_alloc_lock();
struct flock64 flock;
struct inode *inode;
int error;
......@@ -1629,9 +1625,7 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
if (!error)
continue;
lock_kernel();
locks_delete_block(file_lock);
unlock_kernel();
break;
}
......@@ -1648,14 +1642,15 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
*/
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
struct file_lock lock, **before;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
if (!filp->f_dentry->d_inode->i_flock)
before = &filp->f_dentry->d_inode->i_flock;
if (*before == NULL)
return;
lock.fl_type = F_UNLCK;
......@@ -1671,7 +1666,19 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
/* Ignore any error -- we must remove the locks anyway */
}
posix_lock_file(filp, &lock);
/* Can't use posix_lock_file here; we need to remove it no matter
* which pid we have.
*/
lock_kernel();
while (*before != NULL) {
struct file_lock *fl = *before;
if (IS_POSIX(fl) && (fl->fl_owner == owner)) {
locks_delete_lock(before);
continue;
}
before = &fl->fl_next;
}
unlock_kernel();
}
/*
......@@ -1699,6 +1706,7 @@ void locks_remove_flock(struct file *filp)
lease_modify(before, F_UNLCK);
continue;
}
BUG();
}
before = &fl->fl_next;
}
......@@ -1733,7 +1741,7 @@ posix_unblock_lock(struct file *filp, struct file_lock *waiter)
*/
lock_kernel();
if (waiter->fl_next) {
locks_delete_block(waiter);
__locks_delete_block(waiter);
unlock_kernel();
} else {
unlock_kernel();
......@@ -1785,19 +1793,19 @@ static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx)
? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
: (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
}
#if WE_CAN_BREAK_LSLK_NOW
if (inode) {
#if WE_CAN_BREAK_LSLK_NOW
out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
inode->i_sb->s_id, inode->i_ino);
#else
/* userspace relies on this representation of dev_t ;-( */
out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
#endif
} else {
out += sprintf(out, "%d <none>:0 ", fl->fl_pid);
}
#else
/* kdevname is a broken interface. but we expose it to userspace */
out += sprintf(out, "%d %s:%ld ", fl->fl_pid,
inode ? kdevname(to_kdev_t(inode->i_sb->s_dev)) : "<none>",
inode ? inode->i_ino : 0);
#endif
if (IS_POSIX(fl)) {
if (fl->fl_end == OFFSET_MAX)
out += sprintf(out, "%Ld EOF\n", fl->fl_start);
......
......@@ -302,10 +302,13 @@ static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
*
* Both properties are important to certain workloads.
*/
static inline int effective_prio(task_t *p)
static int effective_prio(task_t *p)
{
int bonus, prio;
if (rt_task(p))
return p->prio;
bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 -
MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2;
......@@ -318,10 +321,7 @@ static inline int effective_prio(task_t *p)
}
/*
* activate_task - move a task to the runqueue.
* Also update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
* __activate_task - move a task to the runqueue.
*/
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
......@@ -329,30 +329,57 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
nr_running_inc(rq);
}
static inline void activate_task(task_t *p, runqueue_t *rq)
/*
* activate_task - move a task to the runqueue and do priority recalculation
*
* Update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
*/
static inline int activate_task(task_t *p, runqueue_t *rq)
{
unsigned long sleep_time = jiffies - p->last_run;
int requeue_waker = 0;
if (sleep_time) {
int sleep_avg;
/*
* This code gives a bonus to interactive tasks.
*
* The boost works by updating the 'average sleep time'
* value here, based on ->last_run. The more time a task
* spends sleeping, the higher the average gets - and the
* higher the priority boost gets as well.
*/
sleep_avg = p->sleep_avg + sleep_time;
if (!rt_task(p) && sleep_time) {
/*
* This code gives a bonus to interactive tasks. We update
* an 'average sleep time' value here, based on
* ->last_run. The more time a task spends sleeping,
* the higher the average gets - and the higher the priority
* boost gets as well.
* 'Overflow' bonus ticks go to the waker as well, so the
* ticks are not lost. This has the effect of further
* boosting tasks that are related to maximum-interactive
* tasks.
*/
p->sleep_avg += sleep_time;
if (p->sleep_avg > MAX_SLEEP_AVG) {
int ticks = p->sleep_avg - MAX_SLEEP_AVG + current->sleep_avg;
p->sleep_avg = MAX_SLEEP_AVG;
if (ticks > MAX_SLEEP_AVG)
ticks = MAX_SLEEP_AVG;
if (!in_interrupt())
current->sleep_avg = ticks;
if (sleep_avg > MAX_SLEEP_AVG) {
if (!in_interrupt()) {
sleep_avg += current->sleep_avg - MAX_SLEEP_AVG;
if (sleep_avg > MAX_SLEEP_AVG)
sleep_avg = MAX_SLEEP_AVG;
if (current->sleep_avg != sleep_avg) {
current->sleep_avg = sleep_avg;
requeue_waker = 1;
}
}
sleep_avg = MAX_SLEEP_AVG;
}
if (p->sleep_avg != sleep_avg) {
p->sleep_avg = sleep_avg;
p->prio = effective_prio(p);
}
p->prio = effective_prio(p);
}
__activate_task(p, rq);
return requeue_waker;
}
/*
......@@ -465,8 +492,8 @@ void kick_if_running(task_t * p)
*/
static int try_to_wake_up(task_t * p, unsigned int state, int sync)
{
int success = 0, requeue_waker = 0;
unsigned long flags;
int success = 0;
long old_state;
runqueue_t *rq;
......@@ -492,7 +519,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
if (sync)
__activate_task(p, rq);
else {
activate_task(p, rq);
requeue_waker = activate_task(p, rq);
if (p->prio < rq->curr->prio)
resched_task(rq->curr);
}
......@@ -502,6 +529,21 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
}
task_rq_unlock(rq, &flags);
/*
* We have to do this outside the other spinlock, the two
* runqueues might be different:
*/
if (requeue_waker) {
prio_array_t *array;
rq = task_rq_lock(current, &flags);
array = current->array;
dequeue_task(current, array);
current->prio = effective_prio(current);
enqueue_task(current, array);
task_rq_unlock(rq, &flags);
}
return success;
}
......@@ -527,16 +569,14 @@ void wake_up_forked_process(task_t * p)
runqueue_t *rq = task_rq_lock(current, &flags);
p->state = TASK_RUNNING;
if (!rt_task(p)) {
/*
* We decrease the sleep average of forking parents
* and children as well, to keep max-interactive tasks
* from forking tasks that are max-interactive.
*/
current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100;
p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
p->prio = effective_prio(p);
}
/*
* We decrease the sleep average of forking parents
* and children as well, to keep max-interactive tasks
* from forking tasks that are max-interactive.
*/
current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100;
p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
p->prio = effective_prio(p);
set_task_cpu(p, smp_processor_id());
if (unlikely(!current->array))
......@@ -1147,6 +1187,16 @@ void scheduler_tick(int user_ticks, int sys_ticks)
return;
}
spin_lock(&rq->lock);
/*
* The task was running during this tick - update the
* time slice counter and the sleep average. Note: we
* do not update a thread's priority until it either
* goes to sleep or uses up its timeslice. This makes
* it possible for interactive tasks to use up their
* timeslices at their highest priority levels.
*/
if (p->sleep_avg)
p->sleep_avg--;
if (unlikely(rt_task(p))) {
/*
* RR tasks need a special form of timeslice management.
......@@ -1163,16 +1213,6 @@ void scheduler_tick(int user_ticks, int sys_ticks)
}
goto out;
}
/*
* The task was running during this tick - update the
* time slice counter and the sleep average. Note: we
* do not update a thread's priority until it either
* goes to sleep or uses up its timeslice. This makes
* it possible for interactive tasks to use up their
* timeslices at their highest priority levels.
*/
if (p->sleep_avg)
p->sleep_avg--;
if (!--p->time_slice) {
dequeue_task(p, rq->active);
set_tsk_need_resched(p);
......@@ -2341,7 +2381,7 @@ static int migration_thread(void * data)
set_task_cpu(p, cpu_dest);
if (p->array) {
deactivate_task(p, rq_src);
activate_task(p, rq_dest);
__activate_task(p, rq_dest);
if (p->prio < rq_dest->curr->prio)
resched_task(rq_dest->curr);
}
......@@ -2468,6 +2508,7 @@ void __init sched_init(void)
rq->idle = current;
set_task_cpu(current, smp_processor_id());
wake_up_forked_process(current);
current->prio = MAX_PRIO;
init_timers();
......
......@@ -92,10 +92,9 @@ asmlinkage void do_softirq()
mask &= ~pending;
goto restart;
}
__local_bh_enable();
if (pending)
wakeup_softirqd(cpu);
__local_bh_enable();
}
local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment