summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
commitb63ad0882a16a5d28003e57f2b0b81dee3fb322b (patch)
tree0a343ce219e2b8b38a5d702d66032c57b83d9720 /kernel
parenta9d7bff9a84dba79609a0002e5321b74c4d64c64 (diff)
Merge with 2.4.0-test11.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/context.c60
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/kmod.c28
-rw-r--r--kernel/ksyms.c26
-rw-r--r--kernel/module.c343
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c79
-rw-r--r--kernel/sysctl.c13
11 files changed, 416 insertions, 161 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index a13812119..8f4c218f3 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -12,16 +12,12 @@ O_OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
sysctl.o acct.o capability.o ptrace.o timer.o user.o
-OX_OBJS += signal.o sys.o
+OX_OBJS += signal.o sys.o kmod.o context.o
ifeq ($(CONFIG_UID16),y)
O_OBJS += uid16.o
endif
-ifeq ($(CONFIG_KMOD),y)
-O_OBJS += kmod.o
-endif
-
ifeq ($(CONFIG_MODULES),y)
OX_OBJS += ksyms.o
endif
diff --git a/kernel/context.c b/kernel/context.c
new file mode 100644
index 000000000..b5219786a
--- /dev/null
+++ b/kernel/context.c
@@ -0,0 +1,60 @@
+/*
+ * linux/kernel/context.c
+ *
+ * Mechanism for running arbitrary tasks in process context
+ *
+ * dwmw2@redhat.com
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+static DECLARE_TASK_QUEUE(tq_context);
+static DECLARE_WAIT_QUEUE_HEAD(context_task_wq);
+
+void schedule_task(struct tq_struct *task)
+{
+ queue_task(task, &tq_context);
+ wake_up(&context_task_wq);
+}
+
+EXPORT_SYMBOL(schedule_task);
+
+static int context_thread(void *dummy)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ daemonize();
+ strcpy(current->comm, "eventd");
+
+ spin_lock_irq(&current->sigmask_lock);
+ sigfillset(&current->blocked);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue(&context_task_wq, &wait);
+
+ /*
+ * Careful: we depend on the wait-queue modifications
+ * to also act as memory barriers.
+ */
+ if (!tq_context)
+ schedule();
+
+ remove_wait_queue(&context_task_wq, &wait);
+ current->state = TASK_RUNNING;
+ run_task_queue(&tq_context);
+ }
+}
+
+static int __init start_context_thread(void)
+{
+ kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ return 0;
+}
+
+module_init(start_context_thread);
diff --git a/kernel/exit.c b/kernel/exit.c
index 3f60ec29d..33b68980b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -467,6 +467,14 @@ fake_volatile:
goto fake_volatile;
}
+NORET_TYPE void up_and_exit(struct semaphore *sem, long code)
+{
+ if (sem)
+ up(sem);
+
+ do_exit(code);
+}
+
asmlinkage long sys_exit(int error_code)
{
do_exit((error_code&0xff)<<8);
diff --git a/kernel/fork.c b/kernel/fork.c
index b93b0b0e4..d85c3494a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -39,6 +39,7 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
unsigned long flags;
wq_write_lock_irqsave(&q->lock, flags);
+ wait->flags = 0;
__add_wait_queue(q, wait);
wq_write_unlock_irqrestore(&q->lock, flags);
}
@@ -48,6 +49,7 @@ void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
unsigned long flags;
wq_write_lock_irqsave(&q->lock, flags);
+ wait->flags = WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(q, wait);
wq_write_unlock_irqrestore(&q->lock, flags);
}
@@ -572,9 +574,9 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
*/
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
+
+ get_exec_domain(p->exec_domain);
- if (p->exec_domain && p->exec_domain->module)
- __MOD_INC_USE_COUNT(p->exec_domain->module);
if (p->binfmt && p->binfmt->module)
__MOD_INC_USE_COUNT(p->binfmt->module);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 3fff3ed3d..a21507eec 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -16,17 +16,13 @@
#define __KERNEL_SYSCALLS__
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
-/*
- modprobe_path is set via /proc/sys.
-*/
-char modprobe_path[256] = "/sbin/modprobe";
-
extern int max_threads;
static inline void
@@ -118,7 +114,8 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
}
/* Give kmod all effective privileges.. */
- current->uid = current->euid = current->fsuid = 0;
+ current->euid = current->fsuid = 0;
+ current->egid = current->fsgid = 0;
cap_set_full(current->cap_effective);
/* Allow execve args to be in kernel space. */
@@ -130,10 +127,17 @@ int exec_usermodehelper(char *program_path, char *argv[], char *envp[])
return 0;
}
+#ifdef CONFIG_KMOD
+
+/*
+ modprobe_path is set via /proc/sys.
+*/
+char modprobe_path[256] = "/sbin/modprobe";
+
static int exec_modprobe(void * module_name)
{
static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = { modprobe_path, "-s", "-k", (char*)module_name, NULL };
+ char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL };
int ret;
ret = exec_usermodehelper(modprobe_path, argv, envp);
@@ -226,6 +230,7 @@ int request_module(const char * module_name)
}
return 0;
}
+#endif /* CONFIG_KMOD */
#ifdef CONFIG_HOTPLUG
@@ -247,6 +252,10 @@ int request_module(const char * module_name)
*/
char hotplug_path[256] = "/sbin/hotplug";
+EXPORT_SYMBOL(hotplug_path);
+
+#endif /* CONFIG_HOTPLUG */
+
static int exec_helper (void *arg)
{
@@ -286,5 +295,10 @@ int call_usermodehelper (char *path, char **argv, char **envp)
return retval;
}
+EXPORT_SYMBOL(exec_usermodehelper);
+EXPORT_SYMBOL(call_usermodehelper);
+
+#ifdef CONFIG_KMOD
+EXPORT_SYMBOL(request_module);
#endif
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index b8eb6b4f0..100adaeb3 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -71,20 +71,12 @@ __attribute__((section("__ksymtab"))) = {
#endif
-#ifdef CONFIG_KMOD
-EXPORT_SYMBOL(request_module);
-EXPORT_SYMBOL(exec_usermodehelper);
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(hotplug_path);
-EXPORT_SYMBOL(call_usermodehelper);
-#endif
-#endif
-
-#ifdef CONFIG_MODULES
-EXPORT_SYMBOL(get_module_symbol);
-EXPORT_SYMBOL(put_module_symbol);
+EXPORT_SYMBOL(inter_module_register);
+EXPORT_SYMBOL(inter_module_unregister);
+EXPORT_SYMBOL(inter_module_get);
+EXPORT_SYMBOL(inter_module_get_request);
+EXPORT_SYMBOL(inter_module_put);
EXPORT_SYMBOL(try_inc_mod_count);
-#endif
/* process memory management */
EXPORT_SYMBOL(do_mmap_pgoff);
@@ -124,6 +116,7 @@ EXPORT_SYMBOL(vmtruncate);
EXPORT_SYMBOL(find_vma);
EXPORT_SYMBOL(get_unmapped_area);
EXPORT_SYMBOL(init_mm);
+EXPORT_SYMBOL(deactivate_page);
#ifdef CONFIG_HIGHMEM
EXPORT_SYMBOL(kmap_high);
EXPORT_SYMBOL(kunmap_high);
@@ -215,7 +208,6 @@ EXPORT_SYMBOL(generic_buffer_fdatasync);
EXPORT_SYMBOL(page_hash_bits);
EXPORT_SYMBOL(page_hash_table);
EXPORT_SYMBOL(file_lock_list);
-EXPORT_SYMBOL(file_lock_sem);
EXPORT_SYMBOL(locks_init_lock);
EXPORT_SYMBOL(locks_copy_lock);
EXPORT_SYMBOL(posix_lock_file);
@@ -342,6 +334,7 @@ EXPORT_SYMBOL(register_sysctl_table);
EXPORT_SYMBOL(unregister_sysctl_table);
EXPORT_SYMBOL(sysctl_string);
EXPORT_SYMBOL(sysctl_intvec);
+EXPORT_SYMBOL(sysctl_jiffies);
EXPORT_SYMBOL(proc_dostring);
EXPORT_SYMBOL(proc_dointvec);
EXPORT_SYMBOL(proc_dointvec_jiffies);
@@ -425,6 +418,7 @@ EXPORT_SYMBOL(ioport_resource);
EXPORT_SYMBOL(iomem_resource);
/* process management */
+EXPORT_SYMBOL(up_and_exit);
EXPORT_SYMBOL(__wake_up);
EXPORT_SYMBOL(wake_up_process);
EXPORT_SYMBOL(sleep_on);
@@ -494,10 +488,6 @@ EXPORT_SYMBOL(remove_inode_hash);
EXPORT_SYMBOL(make_bad_inode);
EXPORT_SYMBOL(is_bad_inode);
EXPORT_SYMBOL(event);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(brw_page);
#ifdef CONFIG_UID16
diff --git a/kernel/module.c b/kernel/module.c
index 15efa305c..e89149de9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1,11 +1,14 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <asm/module.h>
#include <asm/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h>
#include <asm/pgalloc.h>
#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
/*
* Originally by Anonymous (as far as I know...)
@@ -14,11 +17,17 @@
* Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C)
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
+ * Add kallsyms support, Keith Owens <kaos@ocs.com.au> Apr 2000
+ * Add asm/module support, IA64 has special requirements. Keith Owens <kaos@ocs.com.au> Sep 2000
+ * Fix assorted bugs in module verification. Keith Owens <kaos@ocs.com.au> Sep 2000
+ * Fix sys_init_module race, Andrew Morton <andrewm@uow.edu.au> Oct 2000
+ * http://www.uwsg.iu.edu/hypermail/linux/kernel/0008.3/0379.html
+ * Replace xxx_module_symbol with inter_module_xxx. Keith Owens <kaos@ocs.com.au> Oct 2000
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*/
-#ifdef CONFIG_MODULES /* a *big* #ifdef block... */
+#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
extern struct module_symbol __start___ksymtab[];
extern struct module_symbol __stop___ksymtab[];
@@ -26,6 +35,9 @@ extern struct module_symbol __stop___ksymtab[];
extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[];
+extern const char __start___kallsyms[] __attribute__ ((weak));
+extern const char __stop___kallsyms[] __attribute__ ((weak));
+
static struct module kernel_module =
{
size_of_struct: sizeof(struct module),
@@ -33,16 +45,182 @@ static struct module kernel_module =
uc: {ATOMIC_INIT(1)},
flags: MOD_RUNNING,
syms: __start___ksymtab,
- ex_table_start: __start___ex_table,
- ex_table_end: __stop___ex_table
+ ex_table_start: __start___ex_table,
+ ex_table_end: __stop___ex_table,
+ kallsyms_start: __start___kallsyms,
+ kallsyms_end: __stop___kallsyms,
};
struct module *module_list = &kernel_module;
+#endif /* defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) */
+
+/* inter_module functions are always available, even when the kernel is
+ * compiled without modules. Consumers of inter_module_xxx routines
+ * will always work, even when both are built into the kernel, this
+ * approach removes lots of #ifdefs in mainline code.
+ */
+
+static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
+static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
+static int kmalloc_failed;
+
+/**
+ * inter_module_register - register a new set of inter module data.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ * @owner: module that is registering the data, always use THIS_MODULE
+ * @userdata: pointer to arbitrary userdata to be registered
+ *
+ * Description: Check that the im_name has not already been registered,
+ * complain if it has. For new data, add it to the inter_module_entry
+ * list.
+ */
+void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime, *ime_new;
+
+ if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
+ /* Overloaded kernel, not fatal */
+ printk(KERN_ERR
+ "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
+ im_name);
+ kmalloc_failed = 1;
+ return;
+ }
+ memset(ime_new, 0, sizeof(*ime_new));
+ ime_new->im_name = im_name;
+ ime_new->owner = owner;
+ ime_new->userdata = userdata;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ spin_unlock(&ime_lock);
+ kfree(ime_new);
+ /* Program logic error, fatal */
+ panic("inter_module_register: duplicate im_name '%s'", im_name);
+ }
+ }
+ list_add(&(ime_new->list), &ime_list);
+ spin_unlock(&ime_lock);
+}
+
+/**
+ * inter_module_unregister - unregister a set of inter module data.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: Check that the im_name has been registered, complain if
+ * it has not. For existing data, remove it from the
+ * inter_module_entry list.
+ */
+void inter_module_unregister(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ list_del(&(ime->list));
+ spin_unlock(&ime_lock);
+ kfree(ime);
+ return;
+ }
+ }
+ spin_unlock(&ime_lock);
+ if (kmalloc_failed) {
+ printk(KERN_ERR
+ "inter_module_unregister: no entry for '%s', "
+ "probably caused by previous kmalloc failure\n",
+ im_name);
+ return;
+ }
+ else {
+ /* Program logic error, fatal */
+ panic("inter_module_unregister: no entry for '%s'", im_name);
+ }
+}
+
+/**
+ * inter_module_get - return arbitrary userdata from another module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: If the im_name has not been registered, return NULL.
+ * Try to increment the use count on the owning module, if that fails
+ * then return NULL. Otherwise return the userdata.
+ */
+const void *inter_module_get(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+ const void *result = NULL;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ if (try_inc_mod_count(ime->owner))
+ result = ime->userdata;
+ break;
+ }
+ }
+ spin_unlock(&ime_lock);
+ return(result);
+}
+
+/**
+ * inter_module_get_request - im get with automatic request_module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ * @modname: module that is expected to register im_name
+ *
+ * Description: If inter_module_get fails, do request_module then retry.
+ */
+const void *inter_module_get_request(const char *im_name, const char *modname)
+{
+ const void *result = inter_module_get(im_name);
+ if (!result) {
+ request_module(modname);
+ result = inter_module_get(im_name);
+ }
+ return(result);
+}
+
+/**
+ * inter_module_put - release use of data from another module.
+ * @im_name: an arbitrary string to identify the data, must be unique
+ *
+ * Description: If the im_name has not been registered, complain,
+ * otherwise decrement the use count on the owning module.
+ */
+void inter_module_put(const char *im_name)
+{
+ struct list_head *tmp;
+ struct inter_module_entry *ime;
+
+ spin_lock(&ime_lock);
+ list_for_each(tmp, &ime_list) {
+ ime = list_entry(tmp, struct inter_module_entry, list);
+ if (strcmp(ime->im_name, im_name) == 0) {
+ if (ime->owner)
+ __MOD_DEC_USE_COUNT(ime->owner);
+ spin_unlock(&ime_lock);
+ return;
+ }
+ }
+ spin_unlock(&ime_lock);
+ panic("inter_module_put: no entry for '%s'", im_name);
+}
+
+
+#if defined(CONFIG_MODULES) /* The rest of the source */
+
static long get_mod_name(const char *user_name, char **buf);
static void put_mod_name(char *buf);
-static struct module *find_module(const char *name);
-static void free_module(struct module *, int tag_freed);
+struct module *find_module(const char *name);
+void free_module(struct module *, int tag_freed);
/*
@@ -151,7 +329,7 @@ asmlinkage long
sys_init_module(const char *name_user, struct module *mod_user)
{
struct module mod_tmp, *mod;
- char *name, *n_name;
+ char *name, *n_name, *name_tmp = NULL;
long namelen, n_namelen, i, error;
unsigned long mod_user_size;
struct module_ref *dep;
@@ -168,7 +346,7 @@ sys_init_module(const char *name_user, struct module *mod_user)
goto err1;
}
- /* Check module header size. We allow a bit of slop over the
+ /* Check module header size. We allow a bit of slop over the
size we are familiar with to cope with a version of insmod
for a newer kernel. But don't over do it. */
if ((error = get_user(mod_user_size, &mod_user->size_of_struct)) != 0)
@@ -185,8 +363,14 @@ sys_init_module(const char *name_user, struct module *mod_user)
/* Hold the current contents while we play with the user's idea
of righteousness. */
mod_tmp = *mod;
+ name_tmp = kmalloc(strlen(mod->name) + 1, GFP_KERNEL); /* Where's kstrdup()? */
+ if (name_tmp == NULL) {
+ error = -ENOMEM;
+ goto err1;
+ }
+ strcpy(name_tmp, mod->name);
- error = copy_from_user(mod, mod_user, sizeof(struct module));
+ error = copy_from_user(mod, mod_user, mod_user_size);
if (error) {
error = -EFAULT;
goto err2;
@@ -203,32 +387,29 @@ sys_init_module(const char *name_user, struct module *mod_user)
/* Make sure all interesting pointers are sane. */
-#define bound(p, n, m) ((unsigned long)(p) >= (unsigned long)(m+1) && \
- (unsigned long)((p)+(n)) <= (unsigned long)(m) + (m)->size)
-
- if (!bound(mod->name, namelen, mod)) {
+ if (!mod_bound(mod->name, namelen, mod)) {
printk(KERN_ERR "init_module: mod->name out of bounds.\n");
goto err2;
}
- if (mod->nsyms && !bound(mod->syms, mod->nsyms, mod)) {
+ if (mod->nsyms && !mod_bound(mod->syms, mod->nsyms, mod)) {
printk(KERN_ERR "init_module: mod->syms out of bounds.\n");
goto err2;
}
- if (mod->ndeps && !bound(mod->deps, mod->ndeps, mod)) {
+ if (mod->ndeps && !mod_bound(mod->deps, mod->ndeps, mod)) {
printk(KERN_ERR "init_module: mod->deps out of bounds.\n");
goto err2;
}
- if (mod->init && !bound(mod->init, 0, mod)) {
+ if (mod->init && !mod_bound(mod->init, 0, mod)) {
printk(KERN_ERR "init_module: mod->init out of bounds.\n");
goto err2;
}
- if (mod->cleanup && !bound(mod->cleanup, 0, mod)) {
+ if (mod->cleanup && !mod_bound(mod->cleanup, 0, mod)) {
printk(KERN_ERR "init_module: mod->cleanup out of bounds.\n");
goto err2;
}
if (mod->ex_table_start > mod->ex_table_end
|| (mod->ex_table_start &&
- !((unsigned long)mod->ex_table_start >= (unsigned long)(mod+1)
+ !((unsigned long)mod->ex_table_start >= ((unsigned long)mod + mod->size_of_struct)
&& ((unsigned long)mod->ex_table_end
< (unsigned long)mod + mod->size)))
|| (((unsigned long)mod->ex_table_start
@@ -242,24 +423,51 @@ sys_init_module(const char *name_user, struct module *mod_user)
goto err2;
}
#ifdef __alpha__
- if (!bound(mod->gp - 0x8000, 0, mod)) {
+ if (!mod_bound(mod->gp - 0x8000, 0, mod)) {
printk(KERN_ERR "init_module: mod->gp out of bounds.\n");
goto err2;
}
#endif
if (mod_member_present(mod, can_unload)
- && mod->can_unload && !bound(mod->can_unload, 0, mod)) {
+ && mod->can_unload && !mod_bound(mod->can_unload, 0, mod)) {
printk(KERN_ERR "init_module: mod->can_unload out of bounds.\n");
goto err2;
}
-
-#undef bound
+ if (mod_member_present(mod, kallsyms_end)) {
+ if (mod->kallsyms_end &&
+ (!mod_bound(mod->kallsyms_start, 0, mod) ||
+ !mod_bound(mod->kallsyms_end, 0, mod))) {
+ printk(KERN_ERR "init_module: mod->kallsyms out of bounds.\n");
+ goto err2;
+ }
+ if (mod->kallsyms_start > mod->kallsyms_end) {
+ printk(KERN_ERR "init_module: mod->kallsyms invalid.\n");
+ goto err2;
+ }
+ }
+ if (mod_member_present(mod, archdata_end)) {
+ if (mod->archdata_end &&
+ (!mod_bound(mod->archdata_start, 0, mod) ||
+ !mod_bound(mod->archdata_end, 0, mod))) {
+ printk(KERN_ERR "init_module: mod->archdata out of bounds.\n");
+ goto err2;
+ }
+ if (mod->archdata_start > mod->archdata_end) {
+ printk(KERN_ERR "init_module: mod->archdata invalid.\n");
+ goto err2;
+ }
+ }
+ if (mod_member_present(mod, kernel_data) && mod->kernel_data) {
+ printk(KERN_ERR "init_module: mod->kernel_data must be zero.\n");
+ goto err2;
+ }
/* Check that the user isn't doing something silly with the name. */
if ((n_namelen = get_mod_name(mod->name - (unsigned long)mod
+ (unsigned long)mod_user,
&n_name)) < 0) {
+ printk(KERN_ERR "init_module: get_mod_name failure.\n");
error = n_namelen;
goto err2;
}
@@ -277,13 +485,17 @@ sys_init_module(const char *name_user, struct module *mod_user)
goto err3;
}
+ if (module_arch_init(mod))
+ goto err3;
+
/* On some machines it is necessary to do something here
to make the I and D caches consistent. */
flush_icache_range((unsigned long)mod, (unsigned long)mod + mod->size);
- /* Update module references. */
mod->next = mod_tmp.next;
mod->refs = NULL;
+
+ /* Sanity check the module's dependents */
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
struct module *o, *d = dep->dep;
@@ -294,18 +506,25 @@ sys_init_module(const char *name_user, struct module *mod_user)
goto err3;
}
- for (o = module_list; o != &kernel_module; o = o->next)
- if (o == d) goto found_dep;
+ /* Scan the current modules for this dependency */
+ for (o = module_list; o != &kernel_module && o != d; o = o->next)
+ ;
- printk(KERN_ERR "init_module: found dependency that is "
+ if (o != d) {
+ printk(KERN_ERR "init_module: found dependency that is "
"(no longer?) a module.\n");
- goto err3;
-
- found_dep:
+ goto err3;
+ }
+ }
+
+ /* Update module references. */
+ for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
+ struct module *d = dep->dep;
+
dep->ref = mod;
dep->next_ref = d->refs;
d->refs = dep;
- /* Being referenced by a dependent module counts as a
+ /* Being referenced by a dependent module counts as a
use as far as kmod is concerned. */
d->flags |= MOD_USED_ONCE;
}
@@ -335,10 +554,12 @@ err3:
put_mod_name(n_name);
err2:
*mod = mod_tmp;
+ strcpy((char *)mod->name, name_tmp); /* We know there is room for this */
err1:
put_mod_name(name);
err0:
unlock_kernel();
+ kfree(name_tmp);
return error;
}
@@ -384,11 +605,11 @@ sys_delete_module(const char *name_user)
}
put_mod_name(name);
error = -EBUSY;
- if (mod->refs != NULL)
+ if (mod->refs != NULL)
goto out;
spin_lock(&unload_lock);
- if (!__MOD_IN_USE(mod)) {
+ if (!__MOD_IN_USE(mod)) {
mod->flags |= MOD_DELETED;
spin_unlock(&unload_lock);
free_module(mod, 0);
@@ -768,7 +989,7 @@ out:
* Look for a module by name, ignoring modules marked for deletion.
*/
-static struct module *
+struct module *
find_module(const char *name)
{
struct module *mod;
@@ -787,7 +1008,7 @@ find_module(const char *name)
* Free the given module.
*/
-static void
+void
free_module(struct module *mod, int tag_freed)
{
struct module_ref *dep;
@@ -795,7 +1016,7 @@ free_module(struct module *mod, int tag_freed)
/* Let the module clean up. */
- if (mod->flags & MOD_RUNNING)
+ if (mod->flags & MOD_RUNNING)
{
if(mod->cleanup)
mod->cleanup();
@@ -852,7 +1073,7 @@ int get_module_list(char *p)
} while (0)
#define safe_copy_cstr(str) safe_copy_str(str, sizeof(str)-1)
- len = strlen(mod->name);
+ len = strlen(mod->name);
safe_copy_str(mod->name, len);
if ((len = 20 - len) > 0) {
@@ -961,58 +1182,6 @@ leave_the_loop:
return len;
}
-/*
- * Gets the address for a symbol in the given module. If modname is
- * NULL, it looks for the name in any registered symbol table. If the
- * modname is an empty string, it looks for the symbol in kernel exported
- * symbol tables. Increase the usage count of the module in which the
- * symbol was found - it's the only way we can guarantee that it's still
- * there by the time our caller actually uses it.
- */
-unsigned long
-get_module_symbol(char *modname, char *symname)
-{
- struct module *mp;
- struct module_symbol *sym;
- int i;
-
- spin_lock(&unload_lock);
- for (mp = module_list; mp; mp = mp->next) {
- if (((modname == NULL) || (strcmp(mp->name, modname) == 0)) &&
- MOD_CAN_QUERY(mp) &&
- (mp->nsyms > 0)) {
- for (i = mp->nsyms, sym = mp->syms;
- i > 0; --i, ++sym) {
-
- if (strcmp(sym->name, symname) == 0) {
- __MOD_INC_USE_COUNT(mp);
- spin_unlock(&unload_lock);
- return sym->value;
- }
- }
- }
- }
- spin_unlock(&unload_lock);
- return 0;
-}
-
-/* Decrease the use count of the module containing a symbol with the
- * address passed.
- */
-void put_module_symbol(unsigned long addr)
-{
- struct module *mp;
-
- for (mp = module_list; mp; mp = mp->next) {
- if (MOD_CAN_QUERY(mp) &&
- addr >= (unsigned long)mp &&
- addr < (unsigned long)mp + mp->size) {
- __MOD_DEC_USE_COUNT(mp);
- return;
- }
- }
-}
-
#else /* CONFIG_MODULES */
/* Dummy syscalls for people who don't want modules */
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 87dea8254..957355ef4 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -24,7 +24,7 @@ static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, u
pgd_t * pgdir;
pmd_t * pgmiddle;
pte_t * pgtable;
- unsigned long maddr;
+ char *maddr;
struct page *page;
repeat:
@@ -54,13 +54,13 @@ repeat:
if (write) {
maddr = kmap(page);
- memcpy((char *)maddr + (addr & ~PAGE_MASK), buf, len);
+ memcpy(maddr + (addr & ~PAGE_MASK), buf, len);
flush_page_to_ram(page);
flush_icache_page(vma, page, addr);
kunmap(page);
} else {
maddr = kmap(page);
- memcpy(buf, (char *)maddr + (addr & ~PAGE_MASK), len);
+ memcpy(buf, maddr + (addr & ~PAGE_MASK), len);
flush_page_to_ram(page);
kunmap(page);
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 3d7aa17d0..b553eb0ff 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -288,7 +288,7 @@ void __release_region(struct resource *parent, unsigned long start, unsigned lon
}
p = &res->sibling;
}
- printk("Trying to free nonexistent resource <%04lx-%04lx>\n", start, end);
+ printk("Trying to free nonexistent resource <%08lx-%08lx>\n", start, end);
}
/*
diff --git a/kernel/sched.c b/kernel/sched.c
index 927bf47e7..119edeb81 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -77,16 +77,14 @@ struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
/*
* The tasklist_lock protects the linked list of processes.
*
- * The scheduler lock is protecting against multiple entry
- * into the scheduling code, and doesn't need to worry
- * about interrupts (because interrupts cannot call the
- * scheduler).
- *
- * The run-queue lock locks the parts that actually access
+ * The runqueue_lock locks the parts that actually access
* and change the run-queues, and have to be interrupt-safe.
+ *
+ * If both locks are to be concurrently held, the runqueue_lock
+ * nests inside the tasklist_lock.
*/
-spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* second */
-rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* third */
+spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
+rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
static LIST_HEAD(runqueue_head);
@@ -199,12 +197,8 @@ static inline int preemption_goodness(struct task_struct * prev, struct task_str
/*
* This is ugly, but reschedule_idle() is very timing-critical.
- * We enter with the runqueue spinlock held, but we might end
- * up unlocking it early, so the caller must not unlock the
- * runqueue, it's always done by reschedule_idle().
- *
- * This function must be inline as anything that saves and restores
- * flags has to do so within the same register window on sparc (Anton)
+ * We `are called with the runqueue spinlock held and we must
+ * not claim the tasklist_lock.
*/
static FASTCALL(void reschedule_idle(struct task_struct * p));
@@ -433,15 +427,27 @@ static inline void __schedule_tail(struct task_struct *prev)
int policy;
/*
+ * prev->policy can be written from here only before `prev'
+ * can be scheduled (before setting prev->has_cpu to zero).
+ * Of course it must also be read before allowing prev
+ * to be rescheduled, but since the write depends on the read
+ * to complete, wmb() is enough. (the spin_lock() acquired
+ * before setting has_cpu is not enough because the spin_lock()
+ * common code semantics allows code outside the critical section
+ * to enter inside the critical section)
+ */
+ policy = prev->policy;
+ prev->policy = policy & ~SCHED_YIELD;
+ wmb();
+
+ /*
* fast path falls through. We have to clear has_cpu before
* checking prev->state to avoid a wakeup race - thus we
* also have to protect against the task exiting early.
*/
task_lock(prev);
- policy = prev->policy;
- prev->policy = policy & ~SCHED_YIELD;
prev->has_cpu = 0;
- wmb();
+ mb();
if (prev->state == TASK_RUNNING)
goto needs_resched;
@@ -535,7 +541,7 @@ handle_softirq_back:
goto move_rr_last;
move_rr_back:
- switch (prev->state & ~TASK_EXCLUSIVE) {
+ switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (signal_pending(prev)) {
prev->state = TASK_RUNNING;
@@ -694,14 +700,14 @@ scheduling_in_interrupt:
}
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
- const int sync)
+ unsigned int wq_mode, const int sync)
{
struct list_head *tmp, *head;
struct task_struct *p, *best_exclusive;
unsigned long flags;
int best_cpu, irq;
- if (!q || !waitqueue_active(q))
+ if (!q)
goto out;
best_cpu = smp_processor_id();
@@ -730,7 +736,7 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#endif
p = curr->task;
state = p->state;
- if (state & (mode & ~TASK_EXCLUSIVE)) {
+ if (state & mode) {
#if WAITQUEUE_DEBUG
curr->__waker = (long)__builtin_return_address(0);
#endif
@@ -739,18 +745,19 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
* prefer processes which are affine to this
* CPU.
*/
- if (irq && (state & mode & TASK_EXCLUSIVE)) {
+ if (irq && (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)) {
if (!best_exclusive)
best_exclusive = p;
- else if ((p->processor == best_cpu) &&
- (best_exclusive->processor != best_cpu))
- best_exclusive = p;
+ if (p->processor == best_cpu) {
+ best_exclusive = p;
+ break;
+ }
} else {
if (sync)
wake_up_process_synchronous(p);
else
wake_up_process(p);
- if (state & mode & TASK_EXCLUSIVE)
+ if (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)
break;
}
}
@@ -766,14 +773,14 @@ out:
return;
}
-void __wake_up(wait_queue_head_t *q, unsigned int mode)
+void __wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
{
- __wake_up_common(q, mode, 0);
+ __wake_up_common(q, mode, wq_mode, 0);
}
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode)
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
{
- __wake_up_common(q, mode, 1);
+ __wake_up_common(q, mode, wq_mode, 1);
}
#define SLEEP_ON_VAR \
@@ -905,8 +912,8 @@ static int setscheduler(pid_t pid, int policy,
/*
* We play safe to avoid deadlocks.
*/
- spin_lock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
+ read_lock_irq(&tasklist_lock);
+ spin_lock(&runqueue_lock);
p = find_process_by_pid(pid);
@@ -950,8 +957,8 @@ static int setscheduler(pid_t pid, int policy,
current->need_resched = 1;
out_unlock:
- read_unlock(&tasklist_lock);
- spin_unlock_irq(&runqueue_lock);
+ spin_unlock(&runqueue_lock);
+ read_unlock_irq(&tasklist_lock);
out_nounlock:
return retval;
@@ -1227,7 +1234,9 @@ void daemonize(void)
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
-
+ exit_files(current);
+ current->files = init_task.files;
+ atomic_inc(&current->files->count);
}
void __init init_idle(void)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 70da2a029..5591ee2bc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -54,10 +54,10 @@ static int minolduid;
#ifdef CONFIG_KMOD
extern char modprobe_path[];
+#endif
#ifdef CONFIG_HOTPLUG
extern char hotplug_path[];
#endif
-#endif
#ifdef CONFIG_CHR_DEV_SG
extern int sg_big_buff;
#endif
@@ -188,11 +188,11 @@ static ctl_table kern_table[] = {
#ifdef CONFIG_KMOD
{KERN_MODPROBE, "modprobe", &modprobe_path, 256,
0644, NULL, &proc_dostring, &sysctl_string },
+#endif
#ifdef CONFIG_HOTPLUG
{KERN_HOTPLUG, "hotplug", &hotplug_path, 256,
0644, NULL, &proc_dostring, &sysctl_string },
#endif
-#endif
#ifdef CONFIG_CHR_DEV_SG
{KERN_SG_BIG_BUFF, "sg-big-buff", &sg_big_buff, sizeof (int),
0444, NULL, &proc_dointvec},
@@ -571,7 +571,7 @@ static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root
}
/* Don't unregister proc entries that are still being used.. */
- if (de->count)
+ if (atomic_read(&de->count))
continue;
table->de = NULL;
@@ -1240,6 +1240,13 @@ int sysctl_intvec(ctl_table *table, int *name, int nlen,
return -ENOSYS;
}
+int sysctl_jiffies(ctl_table *table, int *name, int nlen,
+ void *oldval, size_t *oldlenp,
+ void *newval, size_t newlen, void **context)
+{
+ return -ENOSYS;
+}
+
int proc_dostring(ctl_table *table, int write, struct file *filp,
void *buffer, size_t *lenp)
{