summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
commitf1382dc4850bb459d24a81c6cb0ef93ea7bd4a79 (patch)
tree225271a3d5dcd4e9dea5ee393556abd754c964b1 /kernel/sched.c
parent135b00fc2e90e605ac2a96b20b0ebd93851a3f89 (diff)
o Merge with Linux 2.1.90.
o Divide L1 cache sizes by 1024 before printing, makes the numbers a bit more credible ...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c124
1 files changed, 88 insertions, 36 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f48f520ff..a86cb0413 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -155,9 +155,9 @@ static inline void move_last_runqueue(struct task_struct * p)
* The run-queue lock locks the parts that actually access
* and change the run-queues, and have to be interrupt-safe.
*/
-rwlock_t tasklist_lock = RW_LOCK_UNLOCKED;
-spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED; /* should be aquired first */
+spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* second */
+rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* third */
/*
* Wake up a process. Put it on the run-queue if it's not
@@ -201,14 +201,20 @@ static void process_timeout(unsigned long __data)
*/
static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
{
+ int policy = p->policy;
int weight;
+ if (policy & SCHED_YIELD) {
+ p->policy = policy & ~SCHED_YIELD;
+ return 0;
+ }
+
/*
* Realtime process, select the first one on the
* runqueue (taking priorities within processes
* into account).
*/
- if (p->policy != SCHED_OTHER)
+ if (policy != SCHED_OTHER)
return 1000 + p->rt_priority;
/*
@@ -228,9 +234,10 @@ static inline int goodness(struct task_struct * p, struct task_struct * prev, in
weight += PROC_CHANGE_PENALTY;
#endif
- /* .. and a slight advantage to the current process */
- if (p == prev)
+ /* .. and a slight advantage to the current thread */
+ if (p->mm == prev->mm)
weight += 1;
+ weight += p->priority;
}
return weight;
@@ -1253,10 +1260,11 @@ asmlinkage int sys_nice(int increment)
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
+ struct task_struct *tsk = current;
+
if (pid)
- return find_task_by_pid(pid);
- else
- return current;
+ tsk = find_task_by_pid(pid);
+ return tsk;
}
static int setscheduler(pid_t pid, int policy,
@@ -1264,48 +1272,70 @@ static int setscheduler(pid_t pid, int policy,
{
struct sched_param lp;
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ retval = -EFAULT;
if (copy_from_user(&lp, param, sizeof(struct sched_param)))
- return -EFAULT;
+ goto out_nounlock;
+
+ /*
+ * We play safe to avoid deadlocks.
+ */
+ spin_lock_irq(&scheduler_lock);
+ spin_lock(&runqueue_lock);
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
+ goto out_unlock;
if (policy < 0)
policy = p->policy;
- else if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_OTHER)
- return -EINVAL;
+ else {
+ retval = -EINVAL;
+ if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_OTHER)
+ goto out_unlock;
+ }
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
* priority for SCHED_OTHER is 0.
*/
+ retval = -EINVAL;
if (lp.sched_priority < 0 || lp.sched_priority > 99)
- return -EINVAL;
+ goto out_unlock;
if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
- return -EINVAL;
+ goto out_unlock;
+ retval = -EPERM;
if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
- return -EPERM;
+ goto out_unlock;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!suser())
- return -EPERM;
+ goto out_unlock;
+ retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
- spin_lock(&scheduler_lock);
- spin_lock_irq(&runqueue_lock);
if (p->next_run)
move_last_runqueue(p);
- spin_unlock_irq(&runqueue_lock);
- spin_unlock(&scheduler_lock);
+
need_resched = 1;
- return 0;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ spin_unlock(&runqueue_lock);
+ spin_unlock_irq(&scheduler_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
@@ -1322,42 +1352,64 @@ asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
asmlinkage int sys_sched_getscheduler(pid_t pid)
{
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ read_lock(&tasklist_lock);
+
+ retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
- return -ESRCH;
+ goto out_unlock;
- return p->policy;
+ retval = p->policy;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
{
struct task_struct *p;
struct sched_param lp;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
-
+ goto out_unlock;
lp.sched_priority = p->rt_priority;
- return copy_to_user(param, &lp, sizeof(struct sched_param)) ? -EFAULT : 0;
+ read_unlock(&tasklist_lock);
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+ return retval;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
}
asmlinkage int sys_sched_yield(void)
{
- /*
- * This is not really right. We'd like to reschedule
- * just _once_ with this process having a zero count.
- */
- current->counter = 0;
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
+ current->policy |= SCHED_YIELD;
move_last_runqueue(current);
spin_unlock_irq(&runqueue_lock);
spin_unlock(&scheduler_lock);