summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
commitb9ba7aeb165cffecdffb60aec8c3fa8d590d9ca9 (patch)
tree42d07b0c7246ae2536a702e7c5de9e2732341116 /kernel
parent7406b0a326f2d70ade2671c37d1beef62249db97 (diff)
Merge with 2.3.99-pre6.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exec_domain.c51
-rw-r--r--kernel/exit.c29
-rw-r--r--kernel/fork.c41
-rw-r--r--kernel/kmod.c19
-rw-r--r--kernel/ksyms.c20
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/pm.c24
-rw-r--r--kernel/ptrace.c43
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/timer.c7
12 files changed, 171 insertions, 98 deletions
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 111a3d69c..46a7443f1 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -32,9 +32,7 @@ static asmlinkage void no_lcall7(int segment, struct pt_regs * regs)
* personality set incorrectly. Check to see whether SVr4 is available,
* and use it, otherwise give the user a SEGV.
*/
- put_exec_domain(current->exec_domain);
- current->personality = PER_SVR4;
- current->exec_domain = lookup_exec_domain(current->personality);
+ set_personality(PER_SVR4);
if (current->exec_domain && current->exec_domain->handler
&& current->exec_domain->handler != no_lcall7) {
@@ -45,7 +43,7 @@ static asmlinkage void no_lcall7(int segment, struct pt_regs * regs)
send_sig(SIGSEGV, current, 1);
}
-struct exec_domain *lookup_exec_domain(unsigned long personality)
+static struct exec_domain *lookup_exec_domain(unsigned long personality)
{
unsigned long pers = personality & PER_MASK;
struct exec_domain *it;
@@ -104,28 +102,37 @@ int unregister_exec_domain(struct exec_domain *it)
return -EINVAL;
}
-asmlinkage long sys_personality(unsigned long personality)
+void __set_personality(unsigned long personality)
{
struct exec_domain *it;
- unsigned long old_personality;
- int ret;
-
- if (personality == 0xffffffff)
- return current->personality;
- ret = -EINVAL;
- lock_kernel();
it = lookup_exec_domain(personality);
- if (!it)
- goto out;
-
- old_personality = current->personality;
- put_exec_domain(current->exec_domain);
- current->personality = personality;
- current->exec_domain = it;
- ret = old_personality;
-out:
- unlock_kernel();
+ if (it) {
+ if (atomic_read(&current->fs->count) != 1) {
+ struct fs_struct *new = copy_fs_struct(current->fs);
+ if (!new)
+ return;
+ put_fs_struct(xchg(&current->fs,new));
+ }
+ /*
+ * At that point we are guaranteed to be the sole owner of
+ * current->fs.
+ */
+ current->personality = personality;
+ current->exec_domain = it;
+ set_fs_altroot();
+ put_exec_domain(current->exec_domain);
+ }
+}
+
+asmlinkage long sys_personality(unsigned long personality)
+{
+ int ret = current->personality;
+ if (personality != 0xffffffff) {
+ set_personality(personality);
+ if (current->personality != personality)
+ ret = -EINVAL;
+ }
return ret;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 45f85aec3..b38b067dd 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -25,7 +25,7 @@ int getrusage(struct task_struct *, int, struct rusage *);
void release(struct task_struct * p)
{
if (p != current) {
-#ifdef __SMP__
+#ifdef CONFIG_SMP
int has_cpu;
/*
@@ -207,6 +207,25 @@ void exit_files(struct task_struct *tsk)
{
__exit_files(tsk);
}
+static inline void __put_fs_struct(struct fs_struct *fs)
+{
+ if (atomic_dec_and_test(&fs->count)) {
+ dput(fs->root);
+ mntput(fs->rootmnt);
+ dput(fs->pwd);
+ mntput(fs->pwdmnt);
+ if (fs->altroot) {
+ dput(fs->altroot);
+ mntput(fs->altrootmnt);
+ }
+ kfree(fs);
+ }
+}
+
+void put_fs_struct(struct fs_struct *fs)
+{
+ __put_fs_struct(fs);
+}
static inline void __exit_fs(struct task_struct *tsk)
{
@@ -214,13 +233,7 @@ static inline void __exit_fs(struct task_struct *tsk)
if (fs) {
tsk->fs = NULL;
- if (atomic_dec_and_test(&fs->count)) {
- dput(fs->root);
- mntput(fs->rootmnt);
- dput(fs->pwd);
- mntput(fs->pwdmnt);
- kfree(fs);
- }
+ __put_fs_struct(fs);
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6eca792d9..fa63452e1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -11,6 +11,7 @@
* management can be a bitch. See 'mm/mm.c': 'copy_page_tables()'
*/
+#include <linux/config.h>
#include <linux/malloc.h>
#include <linux/init.h>
#include <linux/unistd.h>
@@ -98,7 +99,7 @@ static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int
* the common case (not freeing anything) without having
* any locking.
*/
-#ifdef __SMP__
+#ifdef CONFIG_SMP
#define uid_hash_free(up) (!atomic_read(&(up)->count))
#else
#define uid_hash_free(up) (1)
@@ -242,6 +243,8 @@ static inline int dup_mmap(struct mm_struct * mm)
struct file *file;
retval = -ENOMEM;
+ if(mpnt->vm_flags & VM_DONTCOPY)
+ continue;
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!tmp)
goto fail_nomem;
@@ -419,21 +422,41 @@ fail_nomem:
return retval;
}
+static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
+{
+ struct fs_struct *fs = kmalloc(sizeof(*old), GFP_KERNEL);
+ if (fs) {
+ atomic_set(&fs->count, 1);
+ fs->umask = old->umask;
+ fs->rootmnt = mntget(old->rootmnt);
+ fs->root = dget(old->root);
+ fs->pwdmnt = mntget(old->pwdmnt);
+ fs->pwd = dget(old->pwd);
+ if (old->altroot) {
+ fs->altrootmnt = mntget(old->altrootmnt);
+ fs->altroot = dget(old->altroot);
+ } else {
+ fs->altrootmnt = NULL;
+ fs->altroot = NULL;
+ }
+ }
+ return fs;
+}
+
+struct fs_struct *copy_fs_struct(struct fs_struct *old)
+{
+ return __copy_fs_struct(old);
+}
+
static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
{
if (clone_flags & CLONE_FS) {
atomic_inc(&current->fs->count);
return 0;
}
- tsk->fs = kmalloc(sizeof(*tsk->fs), GFP_KERNEL);
+ tsk->fs = __copy_fs_struct(current->fs);
if (!tsk->fs)
return -1;
- atomic_set(&tsk->fs->count, 1);
- tsk->fs->umask = current->fs->umask;
- tsk->fs->root = dget(current->fs->root);
- tsk->fs->pwd = dget(current->fs->pwd);
- tsk->fs->rootmnt = mntget(current->fs->rootmnt);
- tsk->fs->pwdmnt = mntget(current->fs->pwdmnt);
return 0;
}
@@ -673,7 +696,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
p->tty_old_pgrp = 0;
p->times.tms_utime = p->times.tms_stime = 0;
p->times.tms_cutime = p->times.tms_cstime = 0;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
{
int i;
p->has_cpu = 0;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 75d31eadf..bbfaf2992 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -52,17 +52,18 @@ use_init_fs_context(void)
lock_kernel();
our_fs = current->fs;
- dput(our_fs->root);
- dput(our_fs->pwd);
- mntput(our_fs->rootmnt);
- mntput(our_fs->pwdmnt);
-
init_fs = init_task.fs;
our_fs->umask = init_fs->umask;
- our_fs->root = dget(init_fs->root);
- our_fs->pwd = dget(init_fs->pwd);
- our_fs->rootmnt = mntget(init_fs->rootmnt);
- our_fs->pwdmnt = mntget(init_fs->pwdmnt);
+ set_fs_root(our_fs, init_fs->rootmnt, init_fs->root);
+ set_fs_pwd(our_fs, init_fs->pwdmnt, init_fs->pwd);
+ if (our_fs->altroot) {
+ struct vfsmount *mnt = our_fs->altrootmnt;
+ struct dentry *dentry = our_fs->altroot;
+ our_fs->altrootmnt = NULL;
+ our_fs->altroot = NULL;
+ dput(dentry);
+ mntput(mnt);
+ }
unlock_kernel();
}
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 89136a218..ae6760fd2 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -52,6 +52,11 @@
#include <linux/kmod.h>
#endif
+#ifdef CONFIG_BLK_DEV_LVM_MODULE
+extern void (*lvm_hd_name_ptr) ( char*, int);
+EXPORT_SYMBOL(lvm_hd_name_ptr);
+#endif
+
extern int console_loglevel;
extern void set_device_ro(kdev_t dev,int flag);
#if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
@@ -179,6 +184,7 @@ EXPORT_SYMBOL(get_hardblocksize);
EXPORT_SYMBOL(set_blocksize);
EXPORT_SYMBOL(getblk);
EXPORT_SYMBOL(bdget);
+EXPORT_SYMBOL(bdput);
EXPORT_SYMBOL(bread);
EXPORT_SYMBOL(breada);
EXPORT_SYMBOL(__brelse);
@@ -189,6 +195,7 @@ EXPORT_SYMBOL(___wait_on_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(cont_prepare_write);
EXPORT_SYMBOL(generic_commit_write);
EXPORT_SYMBOL(generic_block_bmap);
@@ -235,15 +242,17 @@ EXPORT_SYMBOL(vfs_follow_link);
EXPORT_SYMBOL(page_readlink);
EXPORT_SYMBOL(page_follow_link);
EXPORT_SYMBOL(page_symlink_inode_operations);
+EXPORT_SYMBOL(block_fsync);
EXPORT_SYMBOL(block_symlink);
EXPORT_SYMBOL(vfs_readdir);
-/* for stackable file systems (lofs, wrapfs, etc.) */
-EXPORT_SYMBOL(add_to_page_cache);
+/* for stackable file systems (lofs, wrapfs, cryptfs, etc.) */
+EXPORT_SYMBOL(default_llseek);
+EXPORT_SYMBOL(dentry_open);
EXPORT_SYMBOL(filemap_nopage);
EXPORT_SYMBOL(filemap_swapout);
EXPORT_SYMBOL(filemap_sync);
-EXPORT_SYMBOL(remove_inode_page);
+EXPORT_SYMBOL(lock_page);
#if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
EXPORT_SYMBOL(do_nfsservctl);
@@ -272,6 +281,7 @@ EXPORT_SYMBOL(bmap);
EXPORT_SYMBOL(sync_dev);
EXPORT_SYMBOL(devfs_register_partitions);
EXPORT_SYMBOL(blkdev_open);
+EXPORT_SYMBOL(blkdev_close);
EXPORT_SYMBOL(blkdev_get);
EXPORT_SYMBOL(blkdev_put);
EXPORT_SYMBOL(ioctl_by_bdev);
@@ -307,11 +317,12 @@ EXPORT_SYMBOL(search_binary_handler);
EXPORT_SYMBOL(prepare_binprm);
EXPORT_SYMBOL(compute_creds);
EXPORT_SYMBOL(remove_arg_zero);
+EXPORT_SYMBOL(set_binfmt);
/* execution environment registration */
-EXPORT_SYMBOL(lookup_exec_domain);
EXPORT_SYMBOL(register_exec_domain);
EXPORT_SYMBOL(unregister_exec_domain);
+EXPORT_SYMBOL(__set_personality);
/* sysctl table registration */
EXPORT_SYMBOL(register_sysctl_table);
@@ -398,6 +409,7 @@ EXPORT_SYMBOL(iomem_resource);
/* process management */
EXPORT_SYMBOL(__wake_up);
+EXPORT_SYMBOL(wake_up_process);
EXPORT_SYMBOL(sleep_on);
EXPORT_SYMBOL(sleep_on_timeout);
EXPORT_SYMBOL(interruptible_sleep_on);
diff --git a/kernel/panic.c b/kernel/panic.c
index 920e4a1a7..8e53e13b4 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -65,7 +65,7 @@ NORET_TYPE void panic(const char * fmt, ...)
unblank_console();
-#ifdef __SMP__
+#ifdef CONFIG_SMP
smp_send_stop();
#endif
diff --git a/kernel/pm.c b/kernel/pm.c
index 369d9b954..0f3984bea 100644
--- a/kernel/pm.c
+++ b/kernel/pm.c
@@ -30,13 +30,13 @@ static LIST_HEAD(pm_devs);
/**
* pm_register - register a device with power management
- * @type: The device type
- * @id: Device ID
- * @callback: Callback function
+ * @type: device type
+ * @id: device ID
+ * @callback: callback function
*
* Add a device to the list of devices that wish to be notified about
- * power management events. A pm_dev structure is returnd on success,
- * on failure the return is NULL
+ * power management events. A &pm_dev structure is returned on success,
+ * on failure the return is %NULL.
*/
struct pm_dev *pm_register(pm_dev_t type,
@@ -113,8 +113,8 @@ void pm_unregister_all(pm_callback callback)
* @data: data for the callback
*
* Issue a power management request to a given device. The
- * PM_SUSPEND and PM_RESUME events are handled specially. The
- * data field must hold the intented next state. No call is made
+ * %PM_SUSPEND and %PM_RESUME events are handled specially. The
+ * data field must hold the intended next state. No call is made
* if the state matches.
*
* BUGS: what stops two power management requests occuring in parallel
@@ -171,12 +171,12 @@ static void pm_undo_all(struct pm_dev *last)
}
/**
- * pm_send - send request to all managed device
+ * pm_send_all - send request to all managed devices
* @rqst: power management request
* @data: data for the callback
*
* Issue a power management request to a all devices. The
- * PM_SUSPEND events are handled specially. Any device is
+ * %PM_SUSPEND events are handled specially. Any device is
* permitted to fail a suspend by returning a non zero (error)
* value from its callback function. If any device vetoes a
* suspend request then all other devices that have suspended
@@ -214,14 +214,14 @@ int pm_send_all(pm_request_t rqst, void *data)
/**
* pm_find - find a device
* @type: type of device
- * @from: Where to start looking
+ * @from: where to start looking
*
* Scan the power management list for devices of a specific type. The
* return value for a matching device may be passed to further calls
- * to this function to find further matches. A NULL indicates the end
+ * to this function to find further matches. A %NULL indicates the end
* of the list.
*
- * To search from the beginning pass NULL as the from value.
+ * To search from the beginning pass %NULL as the @from value.
*/
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a87858804..162c7083b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/smp_lock.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -18,7 +19,7 @@
/*
* Access another process' address space, one page at a time.
*/
-static int access_one_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write)
+static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write)
{
pgd_t * pgdir;
pmd_t * pgmiddle;
@@ -65,7 +66,7 @@ repeat:
fault_in_page:
/* -1: out of memory. 0 - unmapped page */
- if (handle_mm_fault(tsk, vma, addr, write) > 0)
+ if (handle_mm_fault(mm, vma, addr, write) > 0)
goto repeat;
return 0;
@@ -78,18 +79,10 @@ bad_pmd:
return 0;
}
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+static int access_mm(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write)
{
- int copied;
- struct vm_area_struct * vma;
+ int copied = 0;
- down(&tsk->mm->mmap_sem);
- vma = find_extend_vma(tsk, addr);
- if (!vma) {
- up(&tsk->mm->mmap_sem);
- return 0;
- }
- copied = 0;
for (;;) {
unsigned long offset = addr & ~PAGE_MASK;
int this_len = PAGE_SIZE - offset;
@@ -97,7 +90,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (this_len > len)
this_len = len;
- retval = access_one_page(tsk, vma, addr, buf, this_len, write);
+ retval = access_one_page(mm, vma, addr, buf, this_len, write);
copied += retval;
if (retval != this_len)
break;
@@ -118,7 +111,29 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
vma = vma->vm_next;
}
- up(&tsk->mm->mmap_sem);
+ return copied;
+}
+
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+{
+ int copied;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+
+ /* Worry about races with exit() */
+ lock_kernel();
+ mm = tsk->mm;
+ atomic_inc(&mm->mm_users);
+ unlock_kernel();
+
+ down(&mm->mmap_sem);
+ vma = find_extend_vma(mm, addr);
+ copied = 0;
+ if (vma)
+ copied = access_mm(mm, vma, addr, buf, len, write);
+
+ up(&mm->mmap_sem);
+ mmput(mm);
return copied;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index d1d49df5c..c846e4160 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -19,6 +19,7 @@
* current-task
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
@@ -80,7 +81,7 @@ static union {
struct kernel_stat kstat = { 0 };
-#ifdef __SMP__
+#ifdef CONFIG_SMP
#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
#define can_schedule(p) (!(p)->has_cpu)
@@ -133,7 +134,7 @@ static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struc
if (!weight)
goto out;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
/* Give a largish advantage to the same processor... */
/* (this is equivalent to penalizing other processors) */
if (p->processor == this_cpu)
@@ -184,7 +185,7 @@ static inline int preemption_goodness(struct task_struct * prev, struct task_str
*/
static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
{
-#ifdef __SMP__
+#ifdef CONFIG_SMP
int this_cpu = smp_processor_id(), target_cpu;
struct task_struct *tsk;
int cpu, best_cpu, i;
@@ -406,7 +407,7 @@ signed long schedule_timeout(signed long timeout)
static inline void __schedule_tail(struct task_struct *prev)
{
current->need_resched |= prev->need_resched;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
if ((prev->state == TASK_RUNNING) &&
(prev != idle_task(smp_processor_id()))) {
unsigned long flags;
@@ -416,7 +417,7 @@ static inline void __schedule_tail(struct task_struct *prev)
}
wmb();
prev->has_cpu = 0;
-#endif /* __SMP__ */
+#endif /* CONFIG_SMP */
}
void schedule_tail(struct task_struct *prev)
@@ -516,7 +517,7 @@ still_running_back:
* sched_data.
*/
sched_data->curr = next;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
next->has_cpu = 1;
next->processor = this_cpu;
#endif
@@ -525,7 +526,7 @@ still_running_back:
if (prev == next)
goto same_process;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
/*
* maintain the per-process 'average timeslice' value.
* (this has to be recalculated even if we reschedule to
@@ -554,7 +555,7 @@ still_running_back:
* rescheduled during switch_to().
*/
-#endif /* __SMP__ */
+#endif /* CONFIG_SMP */
kstat.context_swtch++;
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index 6e30f0347..0c78f80b4 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -934,7 +934,7 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim)
? -EFAULT : 0;
}
-#if !defined(__ia64__)
+#if !defined(__ia64__) && !defined(__s390__)
/*
* Back compatibility for getrlimit. Needed for some apps.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 35883ed4c..5137a8e90 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -228,7 +228,7 @@ static ctl_table kern_table[] = {
static ctl_table vm_table[] = {
{VM_FREEPG, "freepages",
&freepages, sizeof(freepages_t), 0644, NULL, &proc_dointvec},
- {VM_BDFLUSH, "bdflush", &bdf_prm, 9*sizeof(int), 0600, NULL,
+ {VM_BDFLUSH, "bdflush", &bdf_prm, 9*sizeof(int), 0644, NULL,
&proc_dointvec_minmax, &sysctl_intvec, NULL,
&bdflush_min, &bdflush_max},
{VM_OVERCOMMIT_MEMORY, "overcommit_memory", &sysctl_overcommit_memory,
@@ -240,9 +240,9 @@ static ctl_table vm_table[] = {
{VM_PAGERDAEMON, "kswapd",
&pager_daemon, sizeof(pager_daemon_t), 0644, NULL, &proc_dointvec},
{VM_PGT_CACHE, "pagetable_cache",
- &pgt_cache_water, 2*sizeof(int), 0600, NULL, &proc_dointvec},
+ &pgt_cache_water, 2*sizeof(int), 0644, NULL, &proc_dointvec},
{VM_PAGE_CLUSTER, "page-cluster",
- &page_cluster, sizeof(int), 0600, NULL, &proc_dointvec},
+ &page_cluster, sizeof(int), 0644, NULL, &proc_dointvec},
{0}
};
@@ -320,6 +320,7 @@ int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
kfree(context);
if (error != -ENOTDIR)
return error;
+ tmp = tmp->next;
} while (tmp != &root_table_header.ctl_entry);
return -ENOTDIR;
}
@@ -365,14 +366,13 @@ static int parse_table(int *name, int nlen,
void *newval, size_t newlen,
ctl_table *table, void **context)
{
+ int n;
repeat:
if (!nlen)
return -ENOTDIR;
-
+ if (get_user(n, name))
+ return -EFAULT;
for ( ; table->ctl_name; table++) {
- int n;
- if (get_user(n, name))
- return -EFAULT;
if (n == table->ctl_name || table->ctl_name == CTL_ANY) {
int error;
if (table->child) {
diff --git a/kernel/timer.c b/kernel/timer.c
index f087d239f..b28c69123 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -15,6 +15,7 @@
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/timex.h>
#include <linux/delay.h>
@@ -211,7 +212,7 @@ int del_timer(struct timer_list * timer)
return ret;
}
-#ifdef __SMP__
+#ifdef CONFIG_SMP
/*
* SMP specific function to delete periodic timer.
* Caller must disable by some means restarting the timer
@@ -564,7 +565,7 @@ static void update_process_times(unsigned long ticks, unsigned long system)
/*
* SMP does this on a per-CPU basis elsewhere
*/
-#ifndef __SMP__
+#ifndef CONFIG_SMP
struct task_struct * p = current;
unsigned long user = ticks - system;
if (p->pid) {
@@ -748,7 +749,7 @@ asmlinkage long sys_getppid(void)
parent = me->p_opptr;
for (;;) {
pid = parent->pid;
-#if __SMP__
+#if CONFIG_SMP
{
struct task_struct *old = parent;
mb();