summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
commitf1382dc4850bb459d24a81c6cb0ef93ea7bd4a79 (patch)
tree225271a3d5dcd4e9dea5ee393556abd754c964b1 /kernel
parent135b00fc2e90e605ac2a96b20b0ebd93851a3f89 (diff)
o Merge with Linux 2.1.90.
o Divide L1 cache sizes by 1024 before printing, makes the numbers a bit more credible ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/exit.c21
-rw-r--r--kernel/fork.c71
-rw-r--r--kernel/kmod.c149
-rw-r--r--kernel/ksyms.c11
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/sched.c124
-rw-r--r--kernel/signal.c12
-rw-r--r--kernel/sys.c36
-rw-r--r--kernel/sysctl.c14
10 files changed, 366 insertions, 78 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index ff908f68a..4e0a1d87d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -17,6 +17,10 @@ O_OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o sys.o \
OX_OBJS += signal.o
+ifeq ($(CONFIG_KMOD),y)
+O_OBJS += kmod.o
+endif
+
ifeq ($(CONFIG_MODULES),y)
OX_OBJS += ksyms.o
endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 9824f5806..2d5835ac8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -27,7 +27,6 @@
#include <asm/mmu_context.h>
extern void sem_exit (void);
-extern void kerneld_exit(void);
int getrusage(struct task_struct *, int, struct rusage *);
@@ -44,8 +43,14 @@ void release(struct task_struct * p)
charge_uid(p, -1);
nr_tasks--;
add_free_taskslot(p->tarray_ptr);
- unhash_pid(p);
- REMOVE_LINKS(p);
+ {
+ unsigned long flags;
+
+ write_lock_irqsave(&tasklist_lock, flags);
+ unhash_pid(p);
+ REMOVE_LINKS(p);
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ }
release_thread(p);
current->cmin_flt += p->min_flt + p->cmin_flt;
current->cmaj_flt += p->maj_flt + p->cmaj_flt;
@@ -157,7 +162,7 @@ static inline void close_files(struct files_struct * files)
unsigned long set = files->open_fds.fds_bits[j];
i = j * __NFDBITS;
j++;
- if (i >= NR_OPEN)
+ if (i >= files->max_fds)
break;
while (set) {
if (set & 1) {
@@ -183,6 +188,13 @@ static inline void __exit_files(struct task_struct *tsk)
tsk->files = NULL;
if (!--files->count) {
close_files(files);
+ /*
+ * Free the fd array as appropriate ...
+ */
+ if (NR_OPEN * sizeof(struct file *) == PAGE_SIZE)
+ free_page((unsigned long) files->fd);
+ else
+ kfree(files->fd);
kmem_cache_free(files_cachep, files);
}
}
@@ -328,7 +340,6 @@ fake_volatile:
acct_process(code);
del_timer(&current->real_timer);
sem_exit();
- kerneld_exit();
__exit_mm(current);
#if CONFIG_AP1000
exit_msc(current);
diff --git a/kernel/fork.c b/kernel/fork.c
index 38c98b0a8..a08aa2c64 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -41,7 +41,6 @@ kmem_cache_t *mm_cachep;
kmem_cache_t *files_cachep;
struct task_struct *pidhash[PIDHASH_SZ];
-spinlock_t pidhash_lock = SPIN_LOCK_UNLOCKED;
struct task_struct **tarray_freelist = NULL;
spinlock_t taskslot_lock = SPIN_LOCK_UNLOCKED;
@@ -263,6 +262,9 @@ fail_nomem:
/*
* Allocate and initialize an mm_struct.
+ *
+ * NOTE! The mm mutex will be locked until the
+ * caller decides that all systems are go..
*/
struct mm_struct * mm_alloc(void)
{
@@ -275,7 +277,7 @@ struct mm_struct * mm_alloc(void)
mm->count = 1;
mm->map_count = 0;
mm->def_flags = 0;
- mm->mmap_sem = MUTEX;
+ mm->mmap_sem = MUTEX_LOCKED;
/*
* Leave mm->pgd set to the parent's pgd
* so that pgd_offset() is always valid.
@@ -328,6 +330,7 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
retval = dup_mmap(mm);
if (retval)
goto free_pt;
+ up(&mm->mmap_sem);
return 0;
free_mm:
@@ -375,44 +378,66 @@ static inline int copy_fdset(fd_set *dst, fd_set *src)
return __copy_fdset(dst->fds_bits, src->fds_bits);
}
-static inline int copy_files(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
- int i;
struct files_struct *oldf, *newf;
struct file **old_fds, **new_fds;
+ int size, i, error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
- return 0;
+ goto out;
if (clone_flags & CLONE_FILES) {
oldf->count++;
- return 0;
+ goto out;
}
+ tsk->files = NULL;
+ error = -ENOMEM;
newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
- tsk->files = newf;
if (!newf)
- return -1;
+ goto out;
+
+ /*
+ * Allocate the fd array, using get_free_page() if possible.
+ * Eventually we want to make the array size variable ...
+ */
+ size = NR_OPEN * sizeof(struct file *);
+ if (size == PAGE_SIZE)
+ new_fds = (struct file **) __get_free_page(GFP_KERNEL);
+ else
+ new_fds = (struct file **) kmalloc(size, GFP_KERNEL);
+ if (!new_fds)
+ goto out_release;
+ memset((void *) new_fds, 0, size);
newf->count = 1;
+ newf->max_fds = NR_OPEN;
+ newf->fd = new_fds;
newf->close_on_exec = oldf->close_on_exec;
- i = copy_fdset(&newf->open_fds,&oldf->open_fds);
+ i = copy_fdset(&newf->open_fds, &oldf->open_fds);
old_fds = oldf->fd;
- new_fds = newf->fd;
for (; i != 0; i--) {
struct file * f = *old_fds;
old_fds++;
*new_fds = f;
- new_fds++;
if (f)
f->f_count++;
+ new_fds++;
}
- return 0;
+ tsk->files = newf;
+ error = 0;
+out:
+ return error;
+
+out_release:
+ kmem_cache_free(files_cachep, newf);
+ goto out;
}
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
@@ -495,8 +520,15 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
p->start_time = jiffies;
p->tarray_ptr = &task[nr];
*p->tarray_ptr = p;
- SET_LINKS(p);
- hash_pid(p);
+
+ {
+ unsigned long flags;
+ write_lock_irqsave(&tasklist_lock, flags);
+ SET_LINKS(p);
+ hash_pid(p);
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ }
+
nr_tasks++;
error = -ENOMEM;
@@ -553,8 +585,15 @@ bad_fork_cleanup:
if (p->binfmt && p->binfmt->module)
__MOD_DEC_USE_COUNT(p->binfmt->module);
add_free_taskslot(p->tarray_ptr);
- unhash_pid(p);
- REMOVE_LINKS(p);
+
+ {
+ unsigned long flags;
+ write_lock_irqsave(&tasklist_lock, flags);
+ unhash_pid(p);
+ REMOVE_LINKS(p);
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ }
+
nr_tasks--;
bad_fork_free:
free_task_struct(p);
diff --git a/kernel/kmod.c b/kernel/kmod.c
new file mode 100644
index 000000000..a0f58d485
--- /dev/null
+++ b/kernel/kmod.c
@@ -0,0 +1,149 @@
+/*
+ kmod, the new module loader (replaces kerneld)
+ Kirk Petersen
+*/
+
+#define __KERNEL_SYSCALLS__
+
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+static inline _syscall1(int,delete_module,const char *,name_user)
+
+/*
+ kmod_unload_delay and modprobe_path are set via /proc/sys.
+*/
+int kmod_unload_delay = 60;
+char modprobe_path[256] = "/sbin/modprobe";
+char module_name[64] = "";
+char * argv[] = { "modprobe", "-k", NULL, NULL, };
+char * envp[] = { "HOME=/", "TERM=linux", NULL, };
+
+/*
+ kmod_queue synchronizes the kmod thread and the rest of the system
+ kmod_unload_timer is what we use to unload modules
+ after kmod_unload_delay seconds
+*/
+struct wait_queue * kmod_queue = NULL;
+struct timer_list kmod_unload_timer;
+
+/*
+ kmod_thread is the thread that does most of the work. kmod_unload and
+ request_module tell it to wake up and do work.
+*/
+int kmod_thread(void * data)
+{
+ int pid;
+
+ /*
+ Initialize basic thread information
+ */
+ current->session = 1;
+ current->pgrp = 1;
+ sprintf(current->comm, "kmod");
+ sigfillset(&current->blocked);
+
+ /*
+ This is the main kmod_thread loop. It first sleeps, then
+ handles requests from request_module or kmod_unload.
+ */
+
+ while (1) {
+ interruptible_sleep_on(&kmod_queue);
+
+ /*
+ If request_module woke us up, we should try to
+ load module_name. If not, kmod_unload woke us up,
+ do call delete_module.
+ (if somehow both want us to do something, ignore the
+ delete_module request)
+ */
+ if (module_name[0] == '\0') {
+ delete_module(NULL);
+ } else {
+ pid = fork();
+ if (pid > 0) {
+ waitpid(pid, NULL, 0);
+ module_name[0] = '\0';
+ wake_up(&kmod_queue);
+ } else
+ if (pid == 0) {
+
+ /*
+ Call modprobe with module_name. If execve returns,
+ print out an error.
+ */
+ argv[2] = module_name;
+ execve(modprobe_path, argv, envp);
+
+ printk("kmod: failed to load module %s\n", module_name);
+ _exit(0);
+ } else {
+ printk("error, fork failed in kmod\n");
+ }
+ }
+ }
+
+ return 0; /* Never reached. */
+}
+
+/*
+ kmod_unload is the function that the kernel calls when
+ the kmod_unload_timer expires
+*/
+void kmod_unload(unsigned long x)
+{
+ /*
+ wake up the kmod thread, which does the work
+ (we can't call delete_module, as it locks the kernel and
+ we are in the bottom half of the kernel (right?))
+ once it is awake, reset the timer
+ */
+ wake_up(&kmod_queue);
+ kmod_unload_timer.expires = jiffies + (kmod_unload_delay * HZ);
+ add_timer(&kmod_unload_timer);
+}
+
+int kmod_init(void)
+{
+ printk ("Starting kmod\n");
+
+ kernel_thread(kmod_thread, NULL, 0);
+
+ kmod_unload_timer.next = NULL;
+ kmod_unload_timer.prev = NULL;
+ kmod_unload_timer.expires = jiffies + (5 * 60 * HZ);
+ kmod_unload_timer.data = 0L;
+ kmod_unload_timer.function = kmod_unload;
+ add_timer(&kmod_unload_timer);
+
+ return 0;
+}
+
+/*
+ request_module, the function that everyone calls when they need a
+ module to be loaded
+*/
+int request_module(const char * name)
+{
+ /* first, copy the name of the module into module_name */
+ /* then wake_up() the kmod daemon */
+ /* wait for the kmod daemon to finish (it will wake us up) */
+
+ /*
+ kmod_thread is sleeping, so start by copying the name of
+ the module into module_name. Once that is done, wake up
+ kmod_thread.
+ */
+ strcpy(module_name, name);
+ wake_up(&kmod_queue);
+
+ /*
+ Now that we have told kmod_thread what to do, we want to
+ go to sleep and let it do its work. It will wake us up,
+ at which point we will be done (the module will be loaded).
+ */
+ interruptible_sleep_on(&kmod_queue);
+ return 0;
+}
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 869e5e5bb..7ff40d7bd 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -61,8 +61,8 @@ extern unsigned char aux_device_present, kbd_read_mask;
#if defined(CONFIG_PROC_FS)
#include <linux/proc_fs.h>
#endif
-#ifdef CONFIG_KERNELD
-#include <linux/kerneld.h>
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
#endif
#include <asm/irq.h>
#ifdef __SMP__
@@ -91,12 +91,13 @@ __attribute__((section("__ksymtab"))) = {
#endif
+#ifdef CONFIG_KMOD
+EXPORT_SYMBOL(request_module);
+#endif
+
#ifdef CONFIG_MODULES
EXPORT_SYMBOL(get_module_symbol);
#endif
-#ifdef CONFIG_KERNELD
-EXPORT_SYMBOL(kerneld_send);
-#endif
EXPORT_SYMBOL(get_options);
#ifdef CONFIG_PCI
diff --git a/kernel/module.c b/kernel/module.c
index efee5902e..90f0bf1a2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -324,7 +324,7 @@ sys_init_module(const char *name_user, struct module *mod_user)
dep->next_ref = d->refs;
d->refs = dep;
/* Being referenced by a dependant module counts as a
- use as far as kerneld is concerned. */
+ use as far as kmod is concerned. */
d->flags |= MOD_USED_ONCE;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index f48f520ff..a86cb0413 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -155,9 +155,9 @@ static inline void move_last_runqueue(struct task_struct * p)
* The run-queue lock locks the parts that actually access
* and change the run-queues, and have to be interrupt-safe.
*/
-rwlock_t tasklist_lock = RW_LOCK_UNLOCKED;
-spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED;
-spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t scheduler_lock = SPIN_LOCK_UNLOCKED; /* should be aquired first */
+spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* second */
+rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* third */
/*
* Wake up a process. Put it on the run-queue if it's not
@@ -201,14 +201,20 @@ static void process_timeout(unsigned long __data)
*/
static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
{
+ int policy = p->policy;
int weight;
+ if (policy & SCHED_YIELD) {
+ p->policy = policy & ~SCHED_YIELD;
+ return 0;
+ }
+
/*
* Realtime process, select the first one on the
* runqueue (taking priorities within processes
* into account).
*/
- if (p->policy != SCHED_OTHER)
+ if (policy != SCHED_OTHER)
return 1000 + p->rt_priority;
/*
@@ -228,9 +234,10 @@ static inline int goodness(struct task_struct * p, struct task_struct * prev, in
weight += PROC_CHANGE_PENALTY;
#endif
- /* .. and a slight advantage to the current process */
- if (p == prev)
+ /* .. and a slight advantage to the current thread */
+ if (p->mm == prev->mm)
weight += 1;
+ weight += p->priority;
}
return weight;
@@ -1253,10 +1260,11 @@ asmlinkage int sys_nice(int increment)
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
+ struct task_struct *tsk = current;
+
if (pid)
- return find_task_by_pid(pid);
- else
- return current;
+ tsk = find_task_by_pid(pid);
+ return tsk;
}
static int setscheduler(pid_t pid, int policy,
@@ -1264,48 +1272,70 @@ static int setscheduler(pid_t pid, int policy,
{
struct sched_param lp;
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ retval = -EFAULT;
if (copy_from_user(&lp, param, sizeof(struct sched_param)))
- return -EFAULT;
+ goto out_nounlock;
+
+ /*
+ * We play safe to avoid deadlocks.
+ */
+ spin_lock_irq(&scheduler_lock);
+ spin_lock(&runqueue_lock);
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
+ goto out_unlock;
if (policy < 0)
policy = p->policy;
- else if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_OTHER)
- return -EINVAL;
+ else {
+ retval = -EINVAL;
+ if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_OTHER)
+ goto out_unlock;
+ }
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
* priority for SCHED_OTHER is 0.
*/
+ retval = -EINVAL;
if (lp.sched_priority < 0 || lp.sched_priority > 99)
- return -EINVAL;
+ goto out_unlock;
if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
- return -EINVAL;
+ goto out_unlock;
+ retval = -EPERM;
if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
- return -EPERM;
+ goto out_unlock;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!suser())
- return -EPERM;
+ goto out_unlock;
+ retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
- spin_lock(&scheduler_lock);
- spin_lock_irq(&runqueue_lock);
if (p->next_run)
move_last_runqueue(p);
- spin_unlock_irq(&runqueue_lock);
- spin_unlock(&scheduler_lock);
+
need_resched = 1;
- return 0;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ spin_unlock(&runqueue_lock);
+ spin_unlock_irq(&scheduler_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
@@ -1322,42 +1352,64 @@ asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
asmlinkage int sys_sched_getscheduler(pid_t pid)
{
struct task_struct *p;
+ int retval;
+ retval = -EINVAL;
if (pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ read_lock(&tasklist_lock);
+
+ retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
- return -ESRCH;
+ goto out_unlock;
- return p->policy;
+ retval = p->policy;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+
+out_nounlock:
+ return retval;
}
asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
{
struct task_struct *p;
struct sched_param lp;
+ int retval;
+ retval = -EINVAL;
if (!param || pid < 0)
- return -EINVAL;
+ goto out_nounlock;
+ read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
+ retval = -ESRCH;
if (!p)
- return -ESRCH;
-
+ goto out_unlock;
lp.sched_priority = p->rt_priority;
- return copy_to_user(param, &lp, sizeof(struct sched_param)) ? -EFAULT : 0;
+ read_unlock(&tasklist_lock);
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+ return retval;
+
+out_unlock:
+ read_unlock(&tasklist_lock);
+ return retval;
}
asmlinkage int sys_sched_yield(void)
{
- /*
- * This is not really right. We'd like to reschedule
- * just _once_ with this process having a zero count.
- */
- current->counter = 0;
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
+ current->policy |= SCHED_YIELD;
move_last_runqueue(current);
spin_unlock_irq(&runqueue_lock);
spin_unlock(&scheduler_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 53228eb31..c313b0a11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -438,8 +438,16 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sess)
inline int
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
- struct task_struct *p = find_task_by_pid(pid);
- return p ? send_sig_info(sig, info, p) : -ESRCH;
+ int error;
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ error = -ESRCH;
+ if (p)
+ error = send_sig_info(sig, info, p);
+ read_unlock(&tasklist_lock);
+ return error;
}
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index 1d8356de0..e86d18c09 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -578,14 +578,16 @@ asmlinkage int sys_setpgid(pid_t pid, pid_t pgid)
if (pgid < 0)
return -EINVAL;
- if((p = find_task_by_pid(pid)) == NULL)
- return -ESRCH;
-
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
read_lock(&tasklist_lock);
+
err = -ESRCH;
+ p = find_task_by_pid(pid);
+ if (!p)
+ goto out;
+
if (p->p_pptr == current || p->p_opptr == current) {
err = -EPERM;
if (p->session != current->session)
@@ -622,12 +624,17 @@ asmlinkage int sys_getpgid(pid_t pid)
if (!pid) {
return current->pgrp;
} else {
- struct task_struct *p = find_task_by_pid(pid);
+ int retval;
+ struct task_struct *p;
- if(p)
- return p->pgrp;
- else
- return -ESRCH;
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+
+ retval = -ESRCH;
+ if (p)
+ retval = p->pgrp;
+ read_unlock(&tasklist_lock);
+ return retval;
}
}
@@ -642,12 +649,17 @@ asmlinkage int sys_getsid(pid_t pid)
if (!pid) {
return current->session;
} else {
- struct task_struct *p = find_task_by_pid(pid);
+ int retval;
+ struct task_struct *p;
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+
+ retval = -ESRCH;
if(p)
- return p->session;
- else
- return -ESRCH;
+ retval = p->session;
+ read_unlock(&tasklist_lock);
+ return retval;
}
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1b93ad7bd..e6864541f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -41,6 +41,10 @@ extern int console_loglevel, C_A_D, swapout_interval;
extern int bdf_prm[], bdflush_min[], bdflush_max[];
extern char binfmt_java_interpreter[], binfmt_java_appletviewer[];
extern int sysctl_overcommit_memory;
+#ifdef CONFIG_KMOD
+extern char modprobe_path[];
+extern int kmod_unload_delay;
+#endif
#ifdef __sparc__
extern char reboot_command [];
@@ -174,6 +178,12 @@ static ctl_table kern_table[] = {
0644, NULL, &proc_dointvec},
{KERN_PRINTK, "printk", &console_loglevel, 4*sizeof(int),
0644, NULL, &proc_dointvec},
+#ifdef CONFIG_KMOD
+ {KERN_MODPROBE, "modprobe", &modprobe_path, 256,
+ 0644, NULL, &proc_dostring, &sysctl_string },
+ {KERN_KMOD_UNLOAD_DELAY, "kmod_unload_delay", &kmod_unload_delay,
+ sizeof(int), 0644, NULL, &proc_dointvec},
+#endif
{0}
};
@@ -183,12 +193,14 @@ static ctl_table vm_table[] = {
{VM_SWAPOUT, "swapout_interval",
&swapout_interval, sizeof(int), 0600, NULL, &proc_dointvec_jiffies},
{VM_FREEPG, "freepages",
- &min_free_pages, 3*sizeof(int), 0600, NULL, &proc_dointvec},
+ &freepages, sizeof(freepages_t), 0600, NULL, &proc_dointvec},
{VM_BDFLUSH, "bdflush", &bdf_prm, 9*sizeof(int), 0600, NULL,
&proc_dointvec_minmax, &sysctl_intvec, NULL,
&bdflush_min, &bdflush_max},
{VM_OVERCOMMIT_MEMORY, "overcommit_memory", &sysctl_overcommit_memory,
sizeof(sysctl_overcommit_memory), 0644, NULL, &proc_dointvec},
+ {VM_BUFFERMEM, "buffermem",
+ &buffer_mem, sizeof(buffer_mem_t), 0600, NULL, &proc_dointvec},
{0}
};