summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-01-11 04:02:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-01-11 04:02:40 +0000
commite47f00743fc4776491344f2c618cc8dc2c23bcbc (patch)
tree13e03a113a82a184c51c19c209867cfd3a59b3b9 /kernel
parentb2ad5f821b1381492d792ca10b1eb7a107b48f14 (diff)
Merge with Linux 2.4.0.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c18
-rw-r--r--kernel/fork.c38
-rw-r--r--kernel/ksyms.c4
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/signal.c20
5 files changed, 51 insertions, 43 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 50a9f51de..cd642927b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -382,6 +382,7 @@ static void exit_notify(void)
*/
write_lock_irq(&tasklist_lock);
+ current->state = TASK_ZOMBIE;
do_notify_parent(current, current->exit_signal);
while (current->p_cptr != NULL) {
p = current->p_cptr;
@@ -415,9 +416,6 @@ static void exit_notify(void)
}
}
write_unlock_irq(&tasklist_lock);
-
- if (current->leader)
- disassociate_ctty(1);
}
NORET_TYPE void do_exit(long code)
@@ -437,20 +435,26 @@ fake_volatile:
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
+ __exit_mm(tsk);
+
lock_kernel();
sem_exit();
- __exit_mm(tsk);
__exit_files(tsk);
__exit_fs(tsk);
exit_sighand(tsk);
exit_thread();
- tsk->state = TASK_ZOMBIE;
- tsk->exit_code = code;
- exit_notify();
+
+ if (current->leader)
+ disassociate_ctty(1);
+
put_exec_domain(tsk->exec_domain);
if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module);
+
+ tsk->exit_code = code;
+ exit_notify();
schedule();
+ BUG();
/*
* In order to get rid of the "volatile function does return" message
* I did this little loop that confuses gcc to think do_exit really
diff --git a/kernel/fork.c b/kernel/fork.c
index f5d3a83df..99c1f2317 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -192,6 +192,8 @@ fail_nomem:
return retval;
}
+spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -242,7 +244,9 @@ inline void __mmdrop(struct mm_struct *mm)
*/
void mmput(struct mm_struct *mm)
{
- if (atomic_dec_and_test(&mm->mm_users)) {
+ if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
+ list_del(&mm->mmlist);
+ spin_unlock(&mmlist_lock);
exit_mmap(mm);
mmdrop(mm);
}
@@ -272,9 +276,9 @@ void mm_release(void)
}
}
-static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
- struct mm_struct * mm;
+ struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
@@ -289,12 +293,13 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
*
* We need to steal a active VM for that..
*/
- mm = current->mm;
- if (!mm)
+ oldmm = current->mm;
+ if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
- atomic_inc(&mm->mm_users);
+ atomic_inc(&oldmm->mm_users);
+ mm = oldmm;
goto good_mm;
}
@@ -304,16 +309,25 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
goto fail_nomem;
/* Copy the current MM stuff.. */
- memcpy(mm, current->mm, sizeof(*mm));
+ memcpy(mm, oldmm, sizeof(*mm));
if (!mm_init(mm))
goto fail_nomem;
- tsk->mm = mm;
- tsk->active_mm = mm;
-
- down(&current->mm->mmap_sem);
+ down(&oldmm->mmap_sem);
retval = dup_mmap(mm);
- up(&current->mm->mmap_sem);
+ up(&oldmm->mmap_sem);
+
+ /*
+ * Add it to the mmlist after the parent.
+ *
+ * Doing it this way means that we can order
+ * the list, and fork() won't mess up the
+ * ordering significantly.
+ */
+ spin_lock(&mmlist_lock);
+ list_add(&mm->mmlist, &oldmm->mmlist);
+ spin_unlock(&mmlist_lock);
+
if (retval)
goto free_pt;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 8d74cdb91..8afe07cca 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -430,10 +430,8 @@ EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(do_gettimeofday);
EXPORT_SYMBOL(do_settimeofday);
-#ifdef CONFIG_X86
+#if !defined(__ia64__)
EXPORT_SYMBOL(loops_per_jiffy);
-#elif !defined(__ia64__)
-EXPORT_SYMBOL(loops_per_sec);
#endif
EXPORT_SYMBOL(kstat);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1299c8365..bc2dcfa70 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1151,17 +1151,13 @@ static void show_task(struct task_struct * p)
else
printk("\n");
+#ifdef CONFIG_X86
+/* This is very useful, but only works on x86 right now */
{
- struct sigqueue *q;
- char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1];
-
- render_sigset_t(&p->pending.signal, s);
- render_sigset_t(&p->blocked, b);
- printk(" sig: %d %s %s :", signal_pending(p), s, b);
- for (q = p->pending.head; q ; q = q->next)
- printk(" %d", q->info.si_signo);
- printk(" X\n");
+ extern void show_trace(unsigned long);
+ show_trace(p->thread.esp);
}
+#endif
}
char * render_sigset_t(sigset_t *set, char *buffer)
diff --git a/kernel/signal.c b/kernel/signal.c
index c6ec0cb7b..f93ee1d71 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -751,16 +751,6 @@ void do_notify_parent(struct task_struct *tsk, int sig)
status = tsk->exit_code & 0x7f;
why = SI_KERNEL; /* shouldn't happen */
switch (tsk->state) {
- case TASK_ZOMBIE:
- if (tsk->exit_code & 0x80)
- why = CLD_DUMPED;
- else if (tsk->exit_code & 0x7f)
- why = CLD_KILLED;
- else {
- why = CLD_EXITED;
- status = tsk->exit_code >> 8;
- }
- break;
case TASK_STOPPED:
/* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
if (tsk->ptrace & PT_PTRACED)
@@ -770,8 +760,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
break;
default:
- printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
- tsk->state);
+ if (tsk->exit_code & 0x80)
+ why = CLD_DUMPED;
+ else if (tsk->exit_code & 0x7f)
+ why = CLD_KILLED;
+ else {
+ why = CLD_EXITED;
+ status = tsk->exit_code >> 8;
+ }
break;
}
info.si_code = why;