summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-07-11 02:32:09 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-07-11 02:32:09 +0000
commit7f5ea64ad438953cbeb3055f424dfac01d5bcfc7 (patch)
treef8cabc30da0d0eaa578cb6369c816e02af148510 /kernel
parent99e873a7003ab3980a6296c29066e3ab7956a009 (diff)
Merge with Linux 2.4.0-test3.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/sched.c81
-rw-r--r--kernel/sys.c46
-rw-r--r--kernel/timer.c13
4 files changed, 68 insertions, 76 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 536a6dd9c..5675c7f53 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,8 +55,8 @@ void release(struct task_struct * p)
* was given away by the parent in the first place.)
*/
current->counter += p->counter;
- if (current->counter >= current->priority*2)
- current->counter = current->priority*2-1;
+ if (current->counter >= MAX_COUNTER)
+ current->counter = MAX_COUNTER;
free_task_struct(p);
} else {
printk("task releasing itself\n");
diff --git a/kernel/sched.c b/kernel/sched.c
index dd055d92f..38e792167 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -43,6 +43,31 @@ unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
extern void mem_use(void);
/*
+ * Scheduling quanta.
+ *
+ * NOTE! The unix "nice" value influences how long a process
+ * gets. The nice value ranges from -20 to +19, where a -20
+ * is a "high-priority" task, and a "+10" is a low-priority
+ * task.
+ *
+ * We want the time-slice to be around 50ms or so, so this
+ * calculation depends on the value of HZ.
+ */
+#if HZ < 200
+#define LOG2_HZ 7
+#elif HZ < 400
+#define LOG2_HZ 8
+#elif HZ < 800
+#define LOG2_HZ 9
+#elif HZ < 1600
+#define LOG2_HZ 10
+#else
+#define LOG2_HZ 11
+#endif
+
+#define NICE_TO_TICKS(nice) ((((20)-(nice)) >> (LOG2_HZ-5))+1)
+
+/*
* Init task must be ok at boot for the ix86 as we will check its signals
* via the SMP irq return path.
*/
@@ -121,7 +146,7 @@ static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struc
* into account).
*/
if (p->policy != SCHED_OTHER) {
- weight = 1000 + 2*DEF_PRIORITY + p->rt_priority;
+ weight = 1000 + p->rt_priority;
goto out;
}
@@ -146,7 +171,7 @@ static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struc
/* .. and a slight advantage to the current MM */
if (p->mm == this_mm || !p->mm)
weight += 1;
- weight += p->priority;
+ weight += 20 - p->nice;
out:
return weight;
@@ -622,7 +647,7 @@ recalculate:
spin_unlock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
for_each_task(p)
- p->counter = (p->counter >> 1) + p->priority;
+ p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
read_unlock(&tasklist_lock);
spin_lock_irq(&runqueue_lock);
}
@@ -648,7 +673,7 @@ handle_tq_scheduler:
move_rr_last:
if (!prev->counter) {
- prev->counter = prev->priority;
+ prev->counter = NICE_TO_TICKS(prev->nice);
move_last_runqueue(prev);
}
goto move_rr_back;
@@ -821,50 +846,28 @@ void scheduling_functions_end_here(void) { }
asmlinkage long sys_nice(int increment)
{
- unsigned long newprio;
- int increase = 0;
+ long newprio;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
-
- newprio = increment;
if (increment < 0) {
if (!capable(CAP_SYS_NICE))
return -EPERM;
- newprio = -increment;
- increase = 1;
+ if (increment < -40)
+ increment = -40;
}
-
- if (newprio > 40)
- newprio = 40;
- /*
- * do a "normalization" of the priority (traditionally
- * Unix nice values are -20 to 20; Linux doesn't really
- * use that kind of thing, but uses the length of the
- * timeslice instead (default 200 ms). The rounding is
- * why we want to avoid negative values.
- */
- newprio = (newprio * DEF_PRIORITY + 10)/20;
- increment = newprio;
- if (increase)
- increment = -increment;
- /*
- * Current->priority can change between this point
- * and the assignment. We are assigning not doing add/subs
- * so thats ok. Conceptually a process might just instantaneously
- * read the value we stomp over. I don't think that is an issue
- * unless posix makes it one. If so we can loop on changes
- * to current->priority.
- */
- newprio = current->priority - increment;
- if ((signed) newprio < 1)
- newprio = DEF_PRIORITY/20;
- if (newprio > DEF_PRIORITY*2)
- newprio = DEF_PRIORITY*2;
- current->priority = newprio;
+ if (increment > 40)
+ increment = 40;
+
+ newprio = current->nice + increment;
+ if (newprio < -20)
+ newprio = -20;
+ if (newprio > 19)
+ newprio = 19;
+ current->nice = newprio;
return 0;
}
@@ -1066,7 +1069,7 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (p)
- jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : p->priority,
+ jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),
&t);
read_unlock(&tasklist_lock);
if (p)
diff --git a/kernel/sys.c b/kernel/sys.c
index 3079dc295..a3879169e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -199,26 +199,17 @@ static int proc_sel(struct task_struct *p, int which, int who)
asmlinkage long sys_setpriority(int which, int who, int niceval)
{
struct task_struct *p;
- unsigned int priority;
int error;
if (which > 2 || which < 0)
return -EINVAL;
/* normalize: avoid signed division (rounding problems) */
- error = ESRCH;
- priority = niceval;
- if (niceval < 0)
- priority = -niceval;
- if (priority > 20)
- priority = 20;
- priority = (priority * DEF_PRIORITY + 10) / 20 + DEF_PRIORITY;
-
- if (niceval >= 0) {
- priority = 2*DEF_PRIORITY - priority;
- if (!priority)
- priority = 1;
- }
+ error = -ESRCH;
+ if (niceval < -20)
+ niceval = -20;
+ if (niceval > 19)
+ niceval = 19;
read_lock(&tasklist_lock);
for_each_task(p) {
@@ -226,47 +217,46 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
continue;
if (p->uid != current->euid &&
p->uid != current->uid && !capable(CAP_SYS_NICE)) {
- error = EPERM;
+ error = -EPERM;
continue;
}
- if (error == ESRCH)
+ if (error == -ESRCH)
error = 0;
- if (priority > p->priority && !capable(CAP_SYS_NICE))
- error = EACCES;
+ if (niceval < p->nice && !capable(CAP_SYS_NICE))
+ error = -EACCES;
else
- p->priority = priority;
+ p->nice = niceval;
}
read_unlock(&tasklist_lock);
- return -error;
+ return error;
}
/*
* Ugh. To avoid negative return values, "getpriority()" will
* not return the normal nice-value, but a value that has been
- * offset by 20 (ie it returns 0..40 instead of -20..20)
+ * offset by 20 (ie it returns 0..39 instead of -20..19)
*/
asmlinkage long sys_getpriority(int which, int who)
{
struct task_struct *p;
- long max_prio = -ESRCH;
+ long retval = -ESRCH;
if (which > 2 || which < 0)
return -EINVAL;
read_lock(&tasklist_lock);
for_each_task (p) {
+ unsigned niceval;
if (!proc_sel(p, which, who))
continue;
- if (p->priority > max_prio)
- max_prio = p->priority;
+ niceval = p->nice + 20;
+ if (niceval < (unsigned)retval)
+ retval = niceval;
}
read_unlock(&tasklist_lock);
- /* scale the priority from timeslice to 0..40 */
- if (max_prio > 0)
- max_prio = (max_prio * 20 + DEF_PRIORITY/2) / DEF_PRIORITY;
- return max_prio;
+ return retval;
}
diff --git a/kernel/timer.c b/kernel/timer.c
index 1a0c2ba73..591c5e29d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -577,7 +577,7 @@ static void update_process_times(int user_tick)
p->counter = 0;
p->need_resched = 1;
}
- if (p->priority < DEF_PRIORITY)
+ if (p->nice < 0)
kstat.cpu_nice += user_tick;
else
kstat.cpu_user += user_tick;
@@ -629,7 +629,8 @@ static inline void calc_load(unsigned long ticks)
}
}
-volatile unsigned long lost_ticks;
+/* jiffies at the most recent update of wall time */
+unsigned long wall_jiffies;
/*
* This spinlock protect us from races in SMP while playing with xtime. -arca
@@ -647,14 +648,13 @@ static inline void update_times(void)
*/
write_lock_irq(&xtime_lock);
- ticks = lost_ticks;
- lost_ticks = 0;
-
+ ticks = jiffies - wall_jiffies;
if (ticks) {
- calc_load(ticks);
+ wall_jiffies += ticks;
update_wall_time(ticks);
}
write_unlock_irq(&xtime_lock);
+ calc_load(ticks);
}
void timer_bh(void)
@@ -666,7 +666,6 @@ void timer_bh(void)
void do_timer(struct pt_regs *regs)
{
(*(unsigned long *)&jiffies)++;
- lost_ticks++;
update_process_times(user_mode(regs));
mark_bh(TIMER_BH);
if (tq_timer)