summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-07 15:45:24 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-07 15:45:24 +0000
commit9f9f3e6e8548a596697778337110a423c384b6f3 (patch)
tree5dd4b290ef532cf5ecb058e1a92cd3435afeac8c /kernel
parentd5c9a365ee7d2fded249aa5abfc5e89587583029 (diff)
Merge with Linux 2.3.49.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/ksyms.c9
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c6
4 files changed, 47 insertions, 20 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index cce15a524..bec392fca 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -29,12 +29,8 @@ ifeq ($(CONFIG_MODULES),y)
OX_OBJS += ksyms.o
endif
-ifdef CONFIG_ACPI
+ifdef CONFIG_PM
OX_OBJS += pm.o
-else
- ifdef CONFIG_APM
- OX_OBJS += pm.o
- endif
endif
CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 06de40312..82ee62980 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -43,6 +43,7 @@
#include <linux/mm.h>
#include <linux/capability.h>
#include <linux/highuid.h>
+#include <linux/brlock.h>
#if defined(CONFIG_PROC_FS)
#include <linux/proc_fs.h>
@@ -340,6 +341,14 @@ EXPORT_SYMBOL(timer_table);
#ifdef __SMP__
/* Various random spinlocks we want to export */
EXPORT_SYMBOL(tqueue_lock);
+
+/* Big-Reader lock implementation */
+EXPORT_SYMBOL(__brlock_array);
+#ifndef __BRLOCK_USE_ATOMICS
+EXPORT_SYMBOL(__br_write_locks);
+#endif
+EXPORT_SYMBOL(__br_write_lock);
+EXPORT_SYMBOL(__br_write_unlock);
#endif
/* autoirq from drivers/net/auto_irq.c */
diff --git a/kernel/resource.c b/kernel/resource.c
index dd81c462b..3d7aa17d0 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -92,17 +92,7 @@ static struct resource * __request_resource(struct resource *root, struct resour
}
}
-int request_resource(struct resource *root, struct resource *new)
-{
- struct resource *conflict;
-
- write_lock(&resource_lock);
- conflict = __request_resource(root, new);
- write_unlock(&resource_lock);
- return conflict ? -EBUSY : 0;
-}
-
-int release_resource(struct resource *old)
+static int __release_resource(struct resource *old)
{
struct resource *tmp, **p;
@@ -121,6 +111,40 @@ int release_resource(struct resource *old)
return -EINVAL;
}
+int request_resource(struct resource *root, struct resource *new)
+{
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+ conflict = __request_resource(root, new);
+ write_unlock(&resource_lock);
+ return conflict ? -EBUSY : 0;
+}
+
+int release_resource(struct resource *old)
+{
+ int retval;
+
+ write_lock(&resource_lock);
+ retval = __release_resource(old);
+ write_unlock(&resource_lock);
+ return retval;
+}
+
+int check_resource(struct resource *root, unsigned long start, unsigned long len)
+{
+ struct resource *conflict, tmp;
+
+ tmp.start = start;
+ tmp.end = start + len - 1;
+ write_lock(&resource_lock);
+ conflict = __request_resource(root, &tmp);
+ if (!conflict)
+ __release_resource(&tmp);
+ write_unlock(&resource_lock);
+ return conflict ? -EBUSY : 0;
+}
+
/*
* Find empty slot in the resource tree given range and alignment.
*/
diff --git a/kernel/sched.c b/kernel/sched.c
index bec8a4494..e1dbc62ba 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -228,13 +228,11 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
/*
* We will get here often - or in the high CPU contention
* case. No CPU is idle and this process is either lowprio or
- * the preferred CPU is highprio. Try to preemt some other CPU
+ * the preferred CPU is highprio. Try to preempt some other CPU
* only if it's RT or if it's iteractive and the preferred
* cpu won't reschedule shortly.
*/
- if ((p->avg_slice < cacheflush_time && cpu_curr(best_cpu)->avg_slice > cacheflush_time) ||
- p->policy != SCHED_OTHER)
- {
+ if (p->avg_slice < cacheflush_time || (p->policy & ~SCHED_YIELD) != SCHED_OTHER) {
for (i = smp_num_cpus - 1; i >= 0; i--) {
cpu = cpu_logical_map(i);
if (cpu == best_cpu)