summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /kernel/sched.c
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c69
1 files changed, 32 insertions, 37 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ce72ecc7b..03c05e7c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -199,30 +199,17 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
goto send_now;
/*
- * The only heuristics - we use the tsk->avg_slice value
- * to detect 'frequent reschedulers'.
- *
- * If both the woken-up process and the preferred CPU is
- * is a frequent rescheduler, then skip the asynchronous
- * wakeup, the frequent rescheduler will likely chose this
- * task during it's next schedule():
- */
- if (p->policy == SCHED_OTHER) {
- tsk = cpu_curr(best_cpu);
- if (p->avg_slice + tsk->avg_slice < cacheflush_time)
- goto out_no_target;
- }
-
- /*
* We know that the preferred CPU has a cache-affine current
* process, lets try to find a new idle CPU for the woken-up
* process:
*/
- for (i = 0; i < smp_num_cpus; i++) {
+ for (i = smp_num_cpus - 1; i >= 0; i--) {
cpu = cpu_logical_map(i);
+ if (cpu == best_cpu)
+ continue;
tsk = cpu_curr(cpu);
/*
- * We use the first available idle CPU. This creates
+ * We use the last available idle CPU. This creates
* a priority list between idle CPUs, but this is not
* a problem.
*/
@@ -232,26 +219,32 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
/*
* No CPU is idle, but maybe this process has enough priority
- * to preempt it's preferred CPU. (this is a shortcut):
+ * to preempt it's preferred CPU.
*/
tsk = cpu_curr(best_cpu);
if (preemption_goodness(tsk, p, best_cpu) > 0)
goto send_now;
/*
- * We should get here rarely - or in the high CPU contention
+ * We will get here often - or in the high CPU contention
* case. No CPU is idle and this process is either lowprio or
- * the preferred CPU is highprio. Maybe some other CPU can/must
- * be preempted:
+ * the preferred CPU is highprio. Try to preemt some other CPU
+ * only if it's RT or if it's iteractive and the preferred
+ * cpu won't reschedule shortly.
*/
- for (i = 0; i < smp_num_cpus; i++) {
- cpu = cpu_logical_map(i);
- tsk = cpu_curr(cpu);
- if (preemption_goodness(tsk, p, cpu) > 0)
- goto send_now;
+ if ((p->avg_slice < cacheflush_time && cpu_curr(best_cpu)->avg_slice > cacheflush_time) ||
+ p->policy != SCHED_OTHER)
+ {
+ for (i = smp_num_cpus - 1; i >= 0; i--) {
+ cpu = cpu_logical_map(i);
+ if (cpu == best_cpu)
+ continue;
+ tsk = cpu_curr(cpu);
+ if (preemption_goodness(tsk, p, cpu) > 0)
+ goto send_now;
+ }
}
-out_no_target:
spin_unlock_irqrestore(&runqueue_lock, flags);
return;
@@ -397,6 +390,9 @@ signed long schedule_timeout(signed long timeout)
add_timer(&timer);
schedule();
del_timer(&timer);
+ /* RED-PEN. Timer may be running now on another cpu.
+ * Pray that process will not exit enough fastly.
+ */
timeout = expire - jiffies;
@@ -460,9 +456,9 @@ tq_scheduler_back:
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
- if (bh_mask & bh_active)
- goto handle_bh;
-handle_bh_back:
+ if (softirq_state[this_cpu].active & softirq_state[this_cpu].mask)
+ goto handle_softirq;
+handle_softirq_back:
/*
* 'sched_data' is protected by the fact that we can run
@@ -581,6 +577,7 @@ still_running_back:
if (next->active_mm) BUG();
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next, this_cpu);
} else {
if (next->active_mm != mm) BUG();
switch_mm(oldmm, mm, next, this_cpu);
@@ -620,9 +617,9 @@ still_running:
next = prev;
goto still_running_back;
-handle_bh:
- do_bottom_half();
- goto handle_bh_back;
+handle_softirq:
+ do_softirq();
+ goto handle_softirq_back;
handle_tq_scheduler:
run_task_queue(&tq_scheduler);
@@ -1148,7 +1145,6 @@ void daemonize(void)
void __init init_idle(void)
{
- cycles_t t;
struct schedule_data * sched_data;
sched_data = &aligned_data[smp_processor_id()].schedule_data;
@@ -1157,9 +1153,8 @@ void __init init_idle(void)
smp_processor_id(), current->pid);
del_from_runqueue(current);
}
- t = get_cycles();
sched_data->curr = current;
- sched_data->last_schedule = t;
+ sched_data->last_schedule = get_cycles();
}
void __init sched_init(void)
@@ -1184,5 +1179,5 @@ void __init sched_init(void)
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
+ enter_lazy_tlb(&init_mm, current, cpu);
}
-