summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-05-12 21:05:59 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-05-12 21:05:59 +0000
commitba2dacab305c598cd4c34a604f8e276bf5bab5ff (patch)
tree78670a0139bf4d5ace617b29b7eba82bbc74d602 /kernel
parentb77bf69998121e689c5e86cc5630d39a0a9ee6ca (diff)
Merge with Linux 2.3.99-pre7 and various other bits.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exec_domain.c11
-rw-r--r--kernel/exit.c49
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/ksyms.c22
-rw-r--r--kernel/ptrace.c9
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/timer.c115
8 files changed, 134 insertions, 87 deletions
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 46a7443f1..4060c802a 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -110,9 +110,16 @@ void __set_personality(unsigned long personality)
if (it) {
if (atomic_read(&current->fs->count) != 1) {
struct fs_struct *new = copy_fs_struct(current->fs);
- if (!new)
+ struct fs_struct *old;
+ if (!new) {
+ put_exec_domain(it);
return;
- put_fs_struct(xchg(&current->fs,new));
+ }
+ task_lock(current);
+ old = current->fs;
+ current->fs = new;
+ task_unlock(current);
+ put_fs_struct(old);
}
/*
* At that point we are guaranteed to be the sole owner of
diff --git a/kernel/exit.c b/kernel/exit.c
index b38b067dd..8617b9d36 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -182,24 +182,32 @@ static inline void close_files(struct files_struct * files)
extern kmem_cache_t *files_cachep;
+void put_files_struct(struct files_struct *files)
+{
+ if (atomic_dec_and_test(&files->count)) {
+ close_files(files);
+ /*
+ * Free the fd and fdset arrays if we expanded them.
+ */
+ if (files->fd != &files->fd_array[0])
+ free_fd_array(files->fd, files->max_fds);
+ if (files->max_fdset > __FD_SETSIZE) {
+ free_fdset(files->open_fds, files->max_fdset);
+ free_fdset(files->close_on_exec, files->max_fdset);
+ }
+ kmem_cache_free(files_cachep, files);
+ }
+}
+
static inline void __exit_files(struct task_struct *tsk)
{
- struct files_struct * files = xchg(&tsk->files, NULL);
+ struct files_struct * files = tsk->files;
if (files) {
- if (atomic_dec_and_test(&files->count)) {
- close_files(files);
- /*
- * Free the fd and fdset arrays if we expanded them.
- */
- if (files->fd != &files->fd_array[0])
- free_fd_array(files->fd, files->max_fds);
- if (files->max_fdset > __FD_SETSIZE) {
- free_fdset(files->open_fds, files->max_fdset);
- free_fdset(files->close_on_exec, files->max_fdset);
- }
- kmem_cache_free(files_cachep, files);
- }
+ task_lock(tsk);
+ tsk->files = NULL;
+ task_unlock(tsk);
+ put_files_struct(files);
}
}
@@ -232,7 +240,9 @@ static inline void __exit_fs(struct task_struct *tsk)
struct fs_struct * fs = tsk->fs;
if (fs) {
+ task_lock(tsk);
tsk->fs = NULL;
+ task_unlock(tsk);
__put_fs_struct(fs);
}
}
@@ -247,11 +257,9 @@ static inline void __exit_sighand(struct task_struct *tsk)
struct signal_struct * sig = tsk->sig;
if (sig) {
- unsigned long flags;
-
- spin_lock_irqsave(&tsk->sigmask_lock, flags);
+ spin_lock_irq(&tsk->sigmask_lock);
tsk->sig = NULL;
- spin_unlock_irqrestore(&tsk->sigmask_lock, flags);
+ spin_unlock_irq(&tsk->sigmask_lock);
if (atomic_dec_and_test(&sig->count))
kfree(sig);
}
@@ -302,7 +310,10 @@ static inline void __exit_mm(struct task_struct * tsk)
atomic_inc(&mm->mm_count);
mm_release();
if (mm != tsk->active_mm) BUG();
+ /* more a memory barrier than a real lock */
+ task_lock(tsk);
tsk->mm = NULL;
+ task_unlock(tsk);
enter_lazy_tlb(mm, current, smp_processor_id());
mmput(mm);
}
@@ -434,12 +445,10 @@ fake_volatile:
__exit_files(tsk);
__exit_fs(tsk);
__exit_sighand(tsk);
- task_lock(tsk);
exit_thread();
tsk->state = TASK_ZOMBIE;
tsk->exit_code = code;
exit_notify();
- task_unlock(tsk);
put_exec_domain(tsk->exec_domain);
if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module);
diff --git a/kernel/fork.c b/kernel/fork.c
index fa63452e1..424b2ed55 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -6,9 +6,9 @@
/*
* 'fork.c' contains the help-routines for the 'fork' system call
- * (see also system_call.s).
+ * (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
- * management can be a bitch. See 'mm/mm.c': 'copy_page_tables()'
+ * management can be a bitch. See 'mm/memory.c': 'copy_page_tables()'
*/
#include <linux/config.h>
@@ -680,7 +680,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
p->p_cptr = NULL;
init_waitqueue_head(&p->wait_chldexit);
p->vfork_sem = NULL;
- sema_init(&p->exit_sem, 1);
+ spin_lock_init(&p->alloc_lock);
p->sigpending = 0;
sigemptyset(&p->signal);
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index ae6760fd2..af22705ec 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -85,6 +85,7 @@ EXPORT_SYMBOL(exec_usermodehelper);
#ifdef CONFIG_MODULES
EXPORT_SYMBOL(get_module_symbol);
+EXPORT_SYMBOL(try_inc_mod_count);
#endif
EXPORT_SYMBOL(get_option);
EXPORT_SYMBOL(get_options);
@@ -137,18 +138,20 @@ EXPORT_SYMBOL(in_group_p);
EXPORT_SYMBOL(update_atime);
EXPORT_SYMBOL(get_super);
EXPORT_SYMBOL(get_empty_super);
-EXPORT_SYMBOL(remove_vfsmnt);
EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(_fput);
EXPORT_SYMBOL(igrab);
EXPORT_SYMBOL(iunique);
EXPORT_SYMBOL(iget4);
EXPORT_SYMBOL(iput);
-EXPORT_SYMBOL(__namei);
-EXPORT_SYMBOL(lookup_dentry);
-EXPORT_SYMBOL(walk_init);
-EXPORT_SYMBOL(walk_name);
+EXPORT_SYMBOL(follow_up);
+EXPORT_SYMBOL(follow_down);
+EXPORT_SYMBOL(path_init);
+EXPORT_SYMBOL(path_walk);
+EXPORT_SYMBOL(path_release);
+EXPORT_SYMBOL(__user_walk);
EXPORT_SYMBOL(lookup_one);
+EXPORT_SYMBOL(lookup_hash);
EXPORT_SYMBOL(sys_close);
EXPORT_SYMBOL(d_alloc_root);
EXPORT_SYMBOL(d_delete);
@@ -214,7 +217,6 @@ EXPORT_SYMBOL(posix_block_lock);
EXPORT_SYMBOL(posix_unblock_lock);
EXPORT_SYMBOL(locks_mandatory_area);
EXPORT_SYMBOL(dput);
-EXPORT_SYMBOL(is_root_busy);
EXPORT_SYMBOL(have_submounts);
EXPORT_SYMBOL(prune_dcache);
EXPORT_SYMBOL(shrink_dcache_sb);
@@ -230,6 +232,7 @@ EXPORT_SYMBOL(vfs_link);
EXPORT_SYMBOL(vfs_rmdir);
EXPORT_SYMBOL(vfs_unlink);
EXPORT_SYMBOL(vfs_rename);
+EXPORT_SYMBOL(vfs_statfs);
EXPORT_SYMBOL(generic_read_dir);
EXPORT_SYMBOL(__pollwait);
EXPORT_SYMBOL(ROOT_DEV);
@@ -309,6 +312,9 @@ EXPORT_SYMBOL(console_loglevel);
/* filesystem registration */
EXPORT_SYMBOL(register_filesystem);
EXPORT_SYMBOL(unregister_filesystem);
+EXPORT_SYMBOL(kern_mount);
+EXPORT_SYMBOL(kern_umount);
+EXPORT_SYMBOL(may_umount);
/* executable format registration */
EXPORT_SYMBOL(register_binfmt);
@@ -351,7 +357,7 @@ EXPORT_SYMBOL(autoirq_setup);
EXPORT_SYMBOL(autoirq_report);
#endif
-#ifdef __SMP__
+#ifdef CONFIG_SMP
EXPORT_SYMBOL(del_timer_sync);
#endif
EXPORT_SYMBOL(mod_timer);
@@ -361,7 +367,7 @@ EXPORT_SYMBOL(tq_scheduler);
EXPORT_SYMBOL(timer_active);
EXPORT_SYMBOL(timer_table);
-#ifdef __SMP__
+#ifdef CONFIG_SMP
/* Various random spinlocks we want to export */
EXPORT_SYMBOL(tqueue_lock);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 162c7083b..e009bca35 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -121,10 +121,13 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
struct vm_area_struct * vma;
/* Worry about races with exit() */
- lock_kernel();
+ task_lock(tsk);
mm = tsk->mm;
- atomic_inc(&mm->mm_users);
- unlock_kernel();
+ if (mm)
+ atomic_inc(&mm->mm_users);
+ task_unlock(tsk);
+ if (!mm)
+ return 0;
down(&mm->mmap_sem);
vma = find_extend_vma(mm, addr);
diff --git a/kernel/sched.c b/kernel/sched.c
index c846e4160..c42402e95 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1157,6 +1157,8 @@ void __init init_idle(void)
sched_data->last_schedule = get_cycles();
}
+extern void init_timervecs (void);
+
void __init sched_init(void)
{
/*
@@ -1171,6 +1173,8 @@ void __init sched_init(void)
for(nr = 0; nr < PIDHASH_SZ; nr++)
pidhash[nr] = NULL;
+ init_timervecs();
+
init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
init_bh(IMMEDIATE_BH, immediate_bh);
diff --git a/kernel/signal.c b/kernel/signal.c
index 0958af05c..e37b7f399 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -6,6 +6,7 @@
* 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
*/
+#include <linux/config.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/unistd.h>
@@ -387,7 +388,7 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
sigaddset(&t->signal, sig);
if (!sigismember(&t->blocked, sig)) {
t->sigpending = 1;
-#ifdef __SMP__
+#ifdef CONFIG_SMP
/*
* If the task is running on a different CPU
* force a reschedule on the other CPU - note that
@@ -404,7 +405,7 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
if (t->has_cpu && t->processor != smp_processor_id())
smp_send_reschedule(t->processor);
spin_unlock(&runqueue_lock);
-#endif /* __SMP__ */
+#endif /* CONFIG_SMP */
}
out:
diff --git a/kernel/timer.c b/kernel/timer.c
index b28c69123..1f2698dc3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -83,13 +83,13 @@ unsigned long prof_shift = 0;
#define TVR_MASK (TVR_SIZE - 1)
struct timer_vec {
- int index;
- struct timer_list *vec[TVN_SIZE];
+ int index;
+ struct list_head vec[TVN_SIZE];
};
struct timer_vec_root {
- int index;
- struct timer_list *vec[TVR_SIZE];
+ int index;
+ struct list_head vec[TVR_SIZE];
};
static struct timer_vec tv5 = { 0 };
@@ -104,19 +104,22 @@ static struct timer_vec * const tvecs[] = {
#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
-static unsigned long timer_jiffies = 0;
-
-static inline void insert_timer(struct timer_list *timer, struct timer_list **vec)
+void init_timervecs (void)
{
- struct timer_list *next = *vec;
+ int i;
- timer->next = next;
- if (next)
- next->prev = timer;
- *vec = timer;
- timer->prev = (struct timer_list *)vec;
+ for (i = 0; i < TVN_SIZE; i++) {
+ INIT_LIST_HEAD(tv5.vec + i);
+ INIT_LIST_HEAD(tv4.vec + i);
+ INIT_LIST_HEAD(tv3.vec + i);
+ INIT_LIST_HEAD(tv2.vec + i);
+ }
+ for (i = 0; i < TVR_SIZE; i++)
+ INIT_LIST_HEAD(tv1.vec + i);
}
+static unsigned long timer_jiffies = 0;
+
static inline void internal_add_timer(struct timer_list *timer)
{
/*
@@ -124,7 +127,7 @@ static inline void internal_add_timer(struct timer_list *timer)
*/
unsigned long expires = timer->expires;
unsigned long idx = expires - timer_jiffies;
- struct timer_list ** vec;
+ struct list_head * vec;
if (idx < TVR_SIZE) {
int i = expires & TVR_MASK;
@@ -148,10 +151,13 @@ static inline void internal_add_timer(struct timer_list *timer)
vec = tv5.vec + i;
} else {
/* Can only get here on architectures with 64-bit jiffies */
- timer->next = timer->prev = timer;
+ INIT_LIST_HEAD(&timer->list);
return;
}
- insert_timer(timer, vec);
+ /*
+ * Timers are FIFO!
+ */
+ list_add(&timer->list, vec->prev);
}
spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
@@ -161,7 +167,7 @@ void add_timer(struct timer_list *timer)
unsigned long flags;
spin_lock_irqsave(&timerlist_lock, flags);
- if (timer->prev)
+ if (timer->list.next)
goto bug;
internal_add_timer(timer);
out:
@@ -174,17 +180,12 @@ bug:
goto out;
}
-static inline int detach_timer(struct timer_list *timer)
+static inline int detach_timer (struct timer_list *timer)
{
- struct timer_list *prev = timer->prev;
- if (prev) {
- struct timer_list *next = timer->next;
- prev->next = next;
- if (next)
- next->prev = prev;
- return 1;
- }
- return 0;
+ if (!timer_pending(timer))
+ return 0;
+ list_del(&timer->list);
+ return 1;
}
int mod_timer(struct timer_list *timer, unsigned long expires)
@@ -207,7 +208,7 @@ int del_timer(struct timer_list * timer)
spin_lock_irqsave(&timerlist_lock, flags);
ret = detach_timer(timer);
- timer->next = timer->prev = 0;
+ timer->list.next = timer->list.prev = NULL;
spin_unlock_irqrestore(&timerlist_lock, flags);
return ret;
}
@@ -231,7 +232,7 @@ int del_timer_sync(struct timer_list * timer)
spin_lock_irqsave(&timerlist_lock, flags);
ret += detach_timer(timer);
- timer->next = timer->prev = 0;
+ timer->list.next = timer->list.prev = 0;
running = timer->running;
spin_unlock_irqrestore(&timerlist_lock, flags);
@@ -247,42 +248,58 @@ int del_timer_sync(struct timer_list * timer)
static inline void cascade_timers(struct timer_vec *tv)
{
- /* cascade all the timers from tv up one level */
- struct timer_list *timer;
- timer = tv->vec[tv->index];
- /*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
- */
- while (timer) {
- struct timer_list *tmp = timer;
- timer = timer->next;
- internal_add_timer(tmp);
- }
- tv->vec[tv->index] = NULL;
- tv->index = (tv->index + 1) & TVN_MASK;
+ /* cascade all the timers from tv up one level */
+ struct list_head *head, *curr, *next;
+
+ head = tv->vec + tv->index;
+ curr = head->next;
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (curr != head) {
+ struct timer_list *tmp;
+
+ tmp = list_entry(curr, struct timer_list, list);
+ next = curr->next;
+ list_del(curr); // not needed
+ internal_add_timer(tmp);
+ curr = next;
+ }
+ INIT_LIST_HEAD(head);
+ tv->index = (tv->index + 1) & TVN_MASK;
}
static inline void run_timer_list(void)
{
spin_lock_irq(&timerlist_lock);
while ((long)(jiffies - timer_jiffies) >= 0) {
- struct timer_list *timer;
+ struct list_head *head, *curr;
if (!tv1.index) {
int n = 1;
do {
cascade_timers(tvecs[n]);
} while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
}
- while ((timer = tv1.vec[tv1.index])) {
- void (*fn)(unsigned long) = timer->function;
- unsigned long data = timer->data;
+repeat:
+ head = tv1.vec + tv1.index;
+ curr = head->next;
+ if (curr != head) {
+ struct timer_list *timer;
+ void (*fn)(unsigned long);
+ unsigned long data;
+
+ timer = list_entry(curr, struct timer_list, list);
+ fn = timer->function;
+ data= timer->data;
+
detach_timer(timer);
- timer->next = timer->prev = NULL;
+ timer->list.next = timer->list.prev = NULL;
timer_set_running(timer);
spin_unlock_irq(&timerlist_lock);
fn(data);
spin_lock_irq(&timerlist_lock);
+ goto repeat;
}
++timer_jiffies;
tv1.index = (tv1.index + 1) & TVR_MASK;
@@ -340,7 +357,7 @@ static void second_overflow(void)
/* Bump the maxerror field */
time_maxerror += time_tolerance >> SHIFT_USEC;
if ( time_maxerror > NTP_PHASE_LIMIT ) {
- time_maxerror = NTP_PHASE_LIMIT;
+ time_maxerror = NTP_PHASE_LIMIT;
time_status |= STA_UNSYNC;
}