Commit ede9de0c authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge bk://linuxusb@bkbits.net/linus-2.5

into kroah.com:/home/linux/linux/BK/gregkh-2.5
parents 136537a2 3d34f2f7
......@@ -173,9 +173,10 @@ ENTRY(lcall27)
ENTRY(ret_from_fork)
# NOTE: this function takes a parameter but it's unused on x86.
pushl %eax
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax
jmp syscall_exit
/*
......
......@@ -1745,25 +1745,25 @@ static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ }
*/
static struct hw_interrupt_type ioapic_edge_irq_type = {
"IO-APIC-edge",
startup_edge_ioapic_irq,
shutdown_edge_ioapic_irq,
enable_edge_ioapic_irq,
disable_edge_ioapic_irq,
ack_edge_ioapic_irq,
end_edge_ioapic_irq,
set_ioapic_affinity,
.typename = "IO-APIC-edge",
.startup = startup_edge_ioapic_irq,
.shutdown = shutdown_edge_ioapic_irq,
.enable = enable_edge_ioapic_irq,
.disable = disable_edge_ioapic_irq,
.ack = ack_edge_ioapic_irq,
.end = end_edge_ioapic_irq,
.set_affinity = set_ioapic_affinity,
};
static struct hw_interrupt_type ioapic_level_irq_type = {
"IO-APIC-level",
startup_level_ioapic_irq,
shutdown_level_ioapic_irq,
enable_level_ioapic_irq,
disable_level_ioapic_irq,
mask_and_ack_level_ioapic_irq,
end_level_ioapic_irq,
set_ioapic_affinity,
.typename = "IO-APIC-level",
.startup = startup_level_ioapic_irq,
.shutdown = shutdown_level_ioapic_irq,
.enable = enable_level_ioapic_irq,
.disable = disable_level_ioapic_irq,
.ack = mask_and_ack_level_ioapic_irq,
.end = end_level_ioapic_irq,
.set_affinity = set_ioapic_affinity,
};
static inline void init_IO_APIC_traps(void)
......@@ -1821,13 +1821,13 @@ static void ack_lapic_irq (unsigned int irq)
static void end_lapic_irq (unsigned int i) { /* nothing */ }
static struct hw_interrupt_type lapic_irq_type = {
"local-APIC-edge",
NULL, /* startup_irq() not used for IRQ0 */
NULL, /* shutdown_irq() not used for IRQ0 */
enable_lapic_irq,
disable_lapic_irq,
ack_lapic_irq,
end_lapic_irq
.typename = "local-APIC-edge",
.startup = NULL, /* startup_irq() not used for IRQ0 */
.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
.enable = enable_lapic_irq,
.disable = disable_lapic_irq,
.ack = ack_lapic_irq,
.end = end_lapic_irq
};
static void setup_nmi (void)
......
......@@ -423,8 +423,12 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
* so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much
* more flexibility.
*
* The return value (in %eax) will be the "prev" task after
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
......@@ -495,6 +499,7 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
tss->bitmap = INVALID_IO_BITMAP_OFFSET;
}
return prev_p;
}
asmlinkage int sys_fork(struct pt_regs regs)
......
......@@ -147,7 +147,7 @@ static unsigned long get_exec_dcookie(struct mm_struct * mm)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!vma->vm_file)
continue;
if (!vma->vm_flags & VM_EXECUTABLE)
if (!(vma->vm_flags & VM_EXECUTABLE))
continue;
cookie = fast_get_dcookie(vma->vm_file->f_dentry,
vma->vm_file->f_vfsmnt);
......
......@@ -9,26 +9,24 @@
#ifdef __KERNEL__
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define switch_to(prev,next,last) do { \
unsigned long esi,edi; \
asm volatile("pushfl\n\t" \
"pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %2,%%esp\n\t" /* restore ESP */ \
"movl %5,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
"pushl %3\n\t" /* restore EIP */ \
"pushl %6\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
"popfl\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
"popfl" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=a" (last),"=S" (esi),"=D" (edi) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next)); \
"2" (prev), "d" (next)); \
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
......
......@@ -62,6 +62,7 @@
{ \
.state = 0, \
.thread_info = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = 0, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
......
......@@ -674,13 +674,19 @@ static void exit_notify(struct task_struct *tsk)
tsk->state = TASK_ZOMBIE;
/*
* No need to unlock IRQs, we'll schedule() immediately
* anyway. In the preemption case this also makes it
* impossible for the task to get runnable again (thus
* the "_raw_" unlock - to make sure we don't try to
* preempt here).
* In the preemption case it must be impossible for the task
* to get runnable again, so use "_raw_" unlock to keep
* preempt_count elevated until we schedule().
*
* To avoid deadlock on SMP, interrupts must be unmasked. If we
* don't, subsequently called functions (e.g, wait_task_inactive()
* via release_task()) will spin, with interrupt flags
* unwittingly blocked, until the other task sleeps. That task
* may itself be waiting for smp_call_function() to answer and
* complete, and with interrupts blocked that will never happen.
*/
_raw_write_unlock(&tasklist_lock);
local_irq_enable();
}
NORET_TYPE void do_exit(long code)
......@@ -727,7 +733,6 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
exit_notify(tsk);
preempt_disable();
if (tsk->exit_signal == -1)
release_task(tsk);
......
......@@ -74,6 +74,9 @@ int nr_processes(void)
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!(tsk->state & (TASK_DEAD | TASK_ZOMBIE)));
WARN_ON(atomic_read(&tsk->usage));
if (tsk != current) {
free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk);
......@@ -217,7 +220,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->thread_info = ti;
ti->task = tsk;
atomic_set(&tsk->usage,1);
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
return tsk;
}
......
......@@ -152,6 +152,7 @@ struct runqueue {
unsigned long nr_running, nr_switches, expired_timestamp,
nr_uninterruptible;
task_t *curr, *idle;
struct mm_struct *prev_mm;
prio_array_t *active, *expired, arrays[2];
int prev_nr_running[NR_CPUS];
#ifdef CONFIG_NUMA
......@@ -388,7 +389,10 @@ static inline void resched_task(task_t *p)
* wait_task_inactive - wait for a thread to unschedule.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time.
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
void wait_task_inactive(task_t * p)
{
......@@ -555,13 +559,40 @@ void sched_exit(task_t * p)
p->sleep_avg) / (EXIT_WEIGHT + 1);
}
/**
* finish_task_switch - clean up after a task-switch
* @prev: the thread we just switched away from.
*
* We enter this with the runqueue still locked, and finish_arch_switch()
* will unlock it along with doing any other architecture-specific cleanup
* actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static inline void finish_task_switch(task_t *prev)
{
runqueue_t *rq = this_rq();
struct mm_struct *mm = rq->prev_mm;
rq->prev_mm = NULL;
finish_arch_switch(rq, prev);
if (mm)
mmdrop(mm);
if (prev->state & (TASK_DEAD | TASK_ZOMBIE))
put_task_struct(prev);
}
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(task_t *prev)
{
finish_arch_switch(this_rq(), prev);
finish_task_switch(prev);
if (current->set_child_tid)
put_user(current->pid, current->set_child_tid);
}
......@@ -570,7 +601,7 @@ asmlinkage void schedule_tail(task_t *prev)
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline task_t * context_switch(task_t *prev, task_t *next)
static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
......@@ -584,7 +615,8 @@ static inline task_t * context_switch(task_t *prev, task_t *next)
if (unlikely(!prev->mm)) {
prev->active_mm = NULL;
mmdrop(oldmm);
WARN_ON(rq->prev_mm);
rq->prev_mm = oldmm;
}
/* Here we just switch the register state and the stack. */
......@@ -1155,7 +1187,7 @@ asmlinkage void schedule(void)
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
if (likely(current->state != TASK_ZOMBIE)) {
if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
if (unlikely(in_atomic())) {
printk(KERN_ERR "bad: scheduling while atomic!\n");
dump_stack();
......@@ -1227,10 +1259,10 @@ asmlinkage void schedule(void)
rq->curr = next;
prepare_arch_switch(rq, next);
prev = context_switch(prev, next);
prev = context_switch(rq, prev, next);
barrier();
rq = this_rq();
finish_arch_switch(rq, prev);
finish_task_switch(prev);
} else
spin_unlock_irq(&rq->lock);
......
......@@ -14,6 +14,8 @@ build-targets := $(host-progs) empty.o
modpost-objs := modpost.o file2alias.o
clean-files := elfconfig.h
# Let clean descend into subdirs
subdir- := lxdialog kconfig
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment