diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-08-29 05:27:07 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-08-29 05:27:07 +0000 |
commit | a60c6812feb6ba35b5b8a9ee8a5ca3d01d1fcd5f (patch) | |
tree | 2290ff15f280314a063f3dfc523742c8934c4259 /kernel/sched.c | |
parent | 1a1d77dd589de5a567fa95e36aa6999c704ceca4 (diff) |
Merge with Linux 2.4.0-test8-pre1.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 40 |
1 files changed, 29 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fda8b5eea..8b47b558a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -455,17 +455,35 @@ signed long schedule_timeout(signed long timeout) static inline void __schedule_tail(struct task_struct *prev) { #ifdef CONFIG_SMP - if ((prev->state == TASK_RUNNING) && - (prev != idle_task(smp_processor_id()))) { - unsigned long flags; - - spin_lock_irqsave(&runqueue_lock, flags); - prev->has_cpu = 0; - reschedule_idle(prev, flags); // spin_unlocks runqueue - } else { - wmb(); - prev->has_cpu = 0; - } + unsigned long flags; + + /* + * fast path falls through. We have to take the runqueue lock + * unconditionally to make sure that the test of prev->state + * and setting has_cpu is atomic wrt. interrupts. It's not + * a big problem in the common case because we recently took + * the runqueue lock so it's likely to be in this processor's + * cache. + */ + spin_lock_irqsave(&runqueue_lock, flags); + prev->has_cpu = 0; + if (prev->state == TASK_RUNNING) + goto running_again; +out_unlock: + spin_unlock_irqrestore(&runqueue_lock, flags); + return; + + /* + * Slow path - we 'push' the previous process and + * reschedule_idle() will attempt to find a new + * processor for it. (but it might preempt the + * current process as well.) + */ +running_again: + if (prev == idle_task(smp_processor_id())) + goto out_unlock; + reschedule_idle(prev, flags); // spin_unlocks runqueue + return; #endif /* CONFIG_SMP */ } |