summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-29 05:27:07 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-29 05:27:07 +0000
commita60c6812feb6ba35b5b8a9ee8a5ca3d01d1fcd5f (patch)
tree2290ff15f280314a063f3dfc523742c8934c4259 /kernel
parent1a1d77dd589de5a567fa95e36aa6999c704ceca4 (diff)
Merge with Linux 2.4.0-test8-pre1.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c75
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/sched.c40
-rw-r--r--kernel/signal.c266
-rw-r--r--kernel/timer.c2
5 files changed, 281 insertions, 108 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 74d2cf5f1..6eb4d568a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -144,17 +144,29 @@ static inline int has_stopped_jobs(int pgrp)
return retval;
}
+/*
+ * When we die, we re-parent all our children.
+ * Try to give them to another thread in our process
+ * group, and if no such member exists, give it to
+ * the global child reaper process (ie "init")
+ */
static inline void forget_original_parent(struct task_struct * father)
{
- struct task_struct * p;
+ struct task_struct * p, *reaper;
read_lock(&tasklist_lock);
+
+ /* Next in our thread group */
+ reaper = next_thread(father);
+ if (reaper == father)
+ reaper = child_reaper;
+
for_each_task(p) {
if (p->p_opptr == father) {
/* We dont want people slaying init */
p->exit_signal = SIGCHLD;
p->self_exec_id++;
- p->p_opptr = child_reaper; /* init */
+ p->p_opptr = reaper;
if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0);
}
}
@@ -378,7 +390,6 @@ static void exit_notify(void)
&& !capable(CAP_KILL))
current->exit_signal = SIGCHLD;
- notify_parent(current, current->exit_signal);
/*
* This loop does two things:
@@ -390,6 +401,7 @@ static void exit_notify(void)
*/
write_lock_irq(&tasklist_lock);
+ do_notify_parent(current, current->exit_signal);
while (current->p_cptr != NULL) {
p = current->p_cptr;
current->p_cptr = p->p_osptr;
@@ -402,7 +414,7 @@ static void exit_notify(void)
p->p_osptr->p_ysptr = p;
p->p_pptr->p_cptr = p;
if (p->state == TASK_ZOMBIE)
- notify_parent(p, p->exit_signal);
+ do_notify_parent(p, p->exit_signal);
/*
* process group orphan check
* Case ii: Our child is in a different pgrp
@@ -483,9 +495,9 @@ asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struc
{
int flag, retval;
DECLARE_WAITQUEUE(wait, current);
- struct task_struct *p;
+ struct task_struct *tsk;
- if (options & ~(WNOHANG|WUNTRACED|__WCLONE|__WALL))
+ if (options & ~(WNOHANG|WUNTRACED|__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
add_wait_queue(&current->wait_chldexit,&wait);
@@ -493,27 +505,30 @@ repeat:
flag = 0;
current->state = TASK_INTERRUPTIBLE;
read_lock(&tasklist_lock);
- for (p = current->p_cptr ; p ; p = p->p_osptr) {
- if (pid>0) {
- if (p->pid != pid)
- continue;
- } else if (!pid) {
- if (p->pgrp != current->pgrp)
- continue;
- } else if (pid != -1) {
- if (p->pgrp != -pid)
+ tsk = current;
+ do {
+ struct task_struct *p;
+ for (p = tsk->p_cptr ; p ; p = p->p_osptr) {
+ if (pid>0) {
+ if (p->pid != pid)
+ continue;
+ } else if (!pid) {
+ if (p->pgrp != current->pgrp)
+ continue;
+ } else if (pid != -1) {
+ if (p->pgrp != -pid)
+ continue;
+ }
+ /* Wait for all children (clone and not) if __WALL is set;
+ * otherwise, wait for clone children *only* if __WCLONE is
+ * set; otherwise, wait for non-clone children *only*. (Note:
+ * A "clone" child here is one that reports to its parent
+ * using a signal other than SIGCHLD.) */
+ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+ && !(options & __WALL))
continue;
- }
- /* Wait for all children (clone and not) if __WALL is set;
- * otherwise, wait for clone children *only* if __WCLONE is
- * set; otherwise, wait for non-clone children *only*. (Note:
- * A "clone" child here is one that reports to its parent
- * using a signal other than SIGCHLD.) */
- if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
- && !(options & __WALL))
- continue;
- flag = 1;
- switch (p->state) {
+ flag = 1;
+ switch (p->state) {
case TASK_STOPPED:
if (!p->exit_code)
continue;
@@ -543,15 +558,19 @@ repeat:
REMOVE_LINKS(p);
p->p_pptr = p->p_opptr;
SET_LINKS(p);
+ do_notify_parent(p, SIGCHLD);
write_unlock_irq(&tasklist_lock);
- notify_parent(p, SIGCHLD);
} else
release(p);
goto end_wait4;
default:
continue;
+ }
}
- }
+ if (options & __WNOTHREAD)
+ break;
+ tsk = next_thread(tsk);
+ } while (tsk != current);
read_unlock(&tasklist_lock);
if (flag) {
retval = 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 4ab0976b1..64dd0f995 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -661,7 +661,13 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
* Let it rip!
*/
retval = p->pid;
+ p->tgid = retval;
+ INIT_LIST_HEAD(&p->thread_group);
write_lock_irq(&tasklist_lock);
+ if (clone_flags & CLONE_THREAD) {
+ p->tgid = current->tgid;
+ list_add(&p->thread_group, &current->thread_group);
+ }
SET_LINKS(p);
hash_pid(p);
nr_threads++;
diff --git a/kernel/sched.c b/kernel/sched.c
index fda8b5eea..8b47b558a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -455,17 +455,35 @@ signed long schedule_timeout(signed long timeout)
static inline void __schedule_tail(struct task_struct *prev)
{
#ifdef CONFIG_SMP
- if ((prev->state == TASK_RUNNING) &&
- (prev != idle_task(smp_processor_id()))) {
- unsigned long flags;
-
- spin_lock_irqsave(&runqueue_lock, flags);
- prev->has_cpu = 0;
- reschedule_idle(prev, flags); // spin_unlocks runqueue
- } else {
- wmb();
- prev->has_cpu = 0;
- }
+ unsigned long flags;
+
+ /*
+ * fast path falls through. We have to take the runqueue lock
+ * unconditionally to make sure that the test of prev->state
+ * and setting has_cpu is atomic wrt. interrupts. It's not
+ * a big problem in the common case because we recently took
+ * the runqueue lock so it's likely to be in this processor's
+ * cache.
+ */
+ spin_lock_irqsave(&runqueue_lock, flags);
+ prev->has_cpu = 0;
+ if (prev->state == TASK_RUNNING)
+ goto running_again;
+out_unlock:
+ spin_unlock_irqrestore(&runqueue_lock, flags);
+ return;
+
+ /*
+ * Slow path - we 'push' the previous process and
+ * reschedule_idle() will attempt to find a new
+ * processor for it. (but it might preempt the
+ * current process as well.)
+ */
+running_again:
+ if (prev == idle_task(smp_processor_id()))
+ goto out_unlock;
+ reschedule_idle(prev, flags); // spin_unlocks runqueue
+ return;
#endif /* CONFIG_SMP */
}
diff --git a/kernel/signal.c b/kernel/signal.c
index b64225778..7b256a954 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -180,7 +180,7 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
if (!(current->notifier)(current->notifier_data)) {
current->sigpending = 0;
return 0;
- }
+ }
}
}
@@ -288,6 +288,18 @@ static int rm_sig_from_queue(int sig, struct task_struct *t)
}
/*
+ * Bad permissions for sending the signal
+ */
+int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
+{
+ return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
+ && ((sig != SIGCONT) || (current->session != t->session))
+ && (current->euid ^ t->suid) && (current->euid ^ t->uid)
+ && (current->uid ^ t->suid) && (current->uid ^ t->uid)
+ && !capable(CAP_KILL);
+}
+
+/*
* Determine whether a signal should be posted or not.
*
* Signals with SIG_IGN can be ignored, except for the
@@ -328,37 +340,13 @@ static int ignored_signal(int sig, struct task_struct *t)
return 1;
}
-int
-send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+/*
+ * Handle TASK_STOPPED.
+ * Also, return true for the unblockable signals that we
+ * should deliver to all threads..
+ */
+static void handle_stop_signal(int sig, struct task_struct *t)
{
- unsigned long flags;
- int ret;
- struct signal_queue *q = 0;
-
-
-#if DEBUG_SIG
-printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
-#endif
-
- ret = -EINVAL;
- if (sig < 0 || sig > _NSIG)
- goto out_nolock;
- /* The somewhat baroque permissions check... */
- ret = -EPERM;
- if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
- && ((sig != SIGCONT) || (current->session != t->session))
- && (current->euid ^ t->suid) && (current->euid ^ t->uid)
- && (current->uid ^ t->suid) && (current->uid ^ t->uid)
- && !capable(CAP_KILL))
- goto out_nolock;
-
- /* The null signal is a permissions and process existance probe.
- No signal is actually delivered. Same goes for zombies. */
- ret = 0;
- if (!sig || !t->sig)
- goto out_nolock;
-
- spin_lock_irqsave(&t->sigmask_lock, flags);
switch (sig) {
case SIGKILL: case SIGCONT:
/* Wake up the process if stopped. */
@@ -377,19 +365,12 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
recalc_sigpending(t);
break;
}
+ return 0;
+}
- /* Optimize away the signal, if it's a signal that can be
- handled immediately (ie non-blocked and untraced) and
- that is ignored (either explicitly or by default). */
-
- if (ignored_signal(sig, t))
- goto out;
-
- /* Support queueing exactly one non-rt signal, so that we
- can get more detailed information about the cause of
- the signal. */
- if (sig < SIGRTMIN && sigismember(&t->signal, sig))
- goto out;
+static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
+{
+ struct signal_queue * q = NULL;
/* Real-time signals must be queued if sent by sigqueue, or
some other real-time mechanism. It is implementation
@@ -400,8 +381,7 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
pass on the info struct. */
if (atomic_read(&nr_queued_signals) < max_queued_signals) {
- q = (struct signal_queue *)
- kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
+ q = kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
}
if (q) {
@@ -434,8 +414,7 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
* Queue overflow, abort. We may abort if the signal was rt
* and sent by user using something other than kill().
*/
- ret = -EAGAIN;
- goto out;
+ return -EAGAIN;
}
sigaddset(&t->signal, sig);
@@ -460,12 +439,118 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
spin_unlock(&runqueue_lock);
#endif /* CONFIG_SMP */
}
+ return 0;
+}
+
+
+/*
+ * Send a thread-group-wide signal.
+ *
+ * Rule: SIGSTOP and SIGKILL get delivered to _everybody_.
+ *
+ * Others get delivered to the thread that doesn't have them
+ * blocked (just one such thread).
+ *
+ * If all threads have it blocked, it gets delievered to the
+ * thread group leader.
+ */
+static int send_tg_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+{
+ int retval = 0;
+ struct task_struct *tsk;
+
+ if (sig < 0 || sig > _NSIG)
+ return -EINVAL;
+
+ if (bad_signal(sig, info, p))
+ return -EPERM;
+
+ if (!sig)
+ return 0;
+
+ tsk = p;
+ do {
+ unsigned long flags;
+ tsk = next_thread(tsk);
+
+ /* Zombie? Ignore */
+ if (!tsk->sig)
+ continue;
+
+ spin_lock_irqsave(&tsk->sigmask_lock, flags);
+ handle_stop_signal(sig, tsk);
+
+ /* Is the signal ignored by this thread? */
+ if (ignored_signal(sig, tsk))
+ goto next;
+
+ /* Have we already delivered this non-queued signal? */
+ if (sig < SIGRTMIN && sigismember(&tsk->signal, sig))
+ goto next;
+
+ /* Not blocked? Go, girl, go! */
+ if (tsk == p || !sigismember(&tsk->blocked, sig)) {
+ retval = deliver_signal(sig, info, tsk);
+
+ /* Signals other than SIGKILL and SIGSTOP have "once" semantics */
+ if (sig != SIGKILL && sig != SIGSTOP)
+ tsk = p;
+ }
+next:
+ spin_unlock_irqrestore(&tsk->sigmask_lock, flags);
+ if ((tsk->state & TASK_INTERRUPTIBLE) && signal_pending(tsk))
+ wake_up_process(tsk);
+ } while (tsk != p);
+ return retval;
+}
+
+
+int
+send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+ unsigned long flags;
+ int ret;
+
+
+#if DEBUG_SIG
+printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
+#endif
+
+ ret = -EINVAL;
+ if (sig < 0 || sig > _NSIG)
+ goto out_nolock;
+ /* The somewhat baroque permissions check... */
+ ret = -EPERM;
+ if (bad_signal(sig, info, t))
+ goto out_nolock;
+
+ /* The null signal is a permissions and process existance probe.
+ No signal is actually delivered. Same goes for zombies. */
+ ret = 0;
+ if (!sig || !t->sig)
+ goto out_nolock;
+
+ spin_lock_irqsave(&t->sigmask_lock, flags);
+ handle_stop_signal(sig, t);
+
+ /* Optimize away the signal, if it's a signal that can be
+ handled immediately (ie non-blocked and untraced) and
+ that is ignored (either explicitly or by default). */
+
+ if (ignored_signal(sig, t))
+ goto out;
+
+ /* Support queueing exactly one non-rt signal, so that we
+ can get more detailed information about the cause of
+ the signal. */
+ if (sig < SIGRTMIN && sigismember(&t->signal, sig))
+ goto out;
+ ret = deliver_signal(sig, info, t);
out:
spin_unlock_irqrestore(&t->sigmask_lock, flags);
- if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
- wake_up_process(t);
-
+ if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
+ wake_up_process(t);
out_nolock:
#if DEBUG_SIG
printk(" %d -> %d\n", signal_pending(t), ret);
@@ -510,22 +595,17 @@ kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
int retval = -EINVAL;
if (pgrp > 0) {
struct task_struct *p;
- int found = 0;
retval = -ESRCH;
read_lock(&tasklist_lock);
for_each_task(p) {
if (p->pgrp == pgrp) {
int err = send_sig_info(sig, info, p);
- if (err != 0)
+ if (retval)
retval = err;
- else
- found++;
}
}
read_unlock(&tasklist_lock);
- if (found)
- retval = 0;
}
return retval;
}
@@ -542,22 +622,17 @@ kill_sl_info(int sig, struct siginfo *info, pid_t sess)
int retval = -EINVAL;
if (sess > 0) {
struct task_struct *p;
- int found = 0;
retval = -ESRCH;
read_lock(&tasklist_lock);
for_each_task(p) {
if (p->leader && p->session == sess) {
int err = send_sig_info(sig, info, p);
- if (err)
+ if (retval)
retval = err;
- else
- found++;
}
}
read_unlock(&tasklist_lock);
- if (found)
- retval = 0;
}
return retval;
}
@@ -577,6 +652,33 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
return error;
}
+
+/*
+ * Send a signal to a thread group..
+ *
+ * If the pid is the thread group ID, we consider this
+ * a "thread group" signal. Otherwise it degenerates into
+ * a thread-specific signal.
+ */
+static int kill_tg_info(int sig, struct siginfo *info, pid_t pid)
+{
+ int error;
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ error = -ESRCH;
+ if (p) {
+ /* Is it the leader? Otherwise it degenerates into a per-thread thing */
+ if (p->tgid == pid)
+ error = send_tg_sig_info(sig, info, p);
+ else
+ error = send_sig_info(sig, info, p);
+ }
+ read_unlock(&tasklist_lock);
+ return error;
+}
+
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
*
@@ -584,8 +686,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
* is probably wrong. Should make it like BSD or SYSV.
*/
-int
-kill_something_info(int sig, struct siginfo *info, int pid)
+static int kill_something_info(int sig, struct siginfo *info, int pid)
{
if (!pid) {
return kill_pg_info(sig, info, current->pgrp);
@@ -607,7 +708,7 @@ kill_something_info(int sig, struct siginfo *info, int pid)
} else if (pid < 0) {
return kill_pg_info(sig, info, -pid);
} else {
- return kill_proc_info(sig, info, pid);
+ return kill_tg_info(sig, info, pid);
}
}
@@ -646,11 +747,24 @@ kill_proc(pid_t pid, int sig, int priv)
}
/*
+ * Joy. Or not. Pthread wants us to wake up every thread
+ * in our parent group.
+ */
+static void wake_up_parent(struct task_struct *parent)
+{
+ struct task_struct *tsk = parent;
+
+ do {
+ wake_up_interruptible(&tsk->wait_chldexit);
+ tsk = next_thread(tsk);
+ } while (tsk != parent);
+}
+
+/*
* Let a parent know about a status change of a child.
*/
-void
-notify_parent(struct task_struct *tsk, int sig)
+void do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
int why, status;
@@ -694,7 +808,23 @@ notify_parent(struct task_struct *tsk, int sig)
info.si_status = status;
send_sig_info(sig, &info, tsk->p_pptr);
- wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
+ wake_up_parent(tsk->p_pptr);
+}
+
+
+/*
+ * We need the tasklist lock because it's the only
+ * thing that protects out "parent" pointer.
+ *
+ * exit.c calls "do_notify_parent()" directly, because
+ * it already has the tasklist lock.
+ */
+void
+notify_parent(struct task_struct *tsk, int sig)
+{
+ read_lock(&tasklist_lock);
+ do_notify_parent(tsk, sig);
+ read_unlock(&tasklist_lock);
}
EXPORT_SYMBOL(dequeue_signal);
diff --git a/kernel/timer.c b/kernel/timer.c
index 00ab398b4..044ba492e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -724,7 +724,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
asmlinkage long sys_getpid(void)
{
/* This is SMP safe - current->pid doesn't change */
- return current->pid;
+ return current->tgid;
}
/*