summaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-16 01:07:24 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-16 01:07:24 +0000
commit95db6b748fc86297827fbd9c9ef174d491c9ad89 (patch)
tree27a92a942821cde1edda9a1b088718d436b3efe4 /kernel/signal.c
parent45b27b0a0652331d104c953a5b192d843fff88f8 (diff)
Merge with Linux 2.3.40.
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c172
1 files changed, 85 insertions, 87 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 688d6fbd2..dfc65edc6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -12,8 +12,9 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/param.h>
+#include <linux/highuid.h>
+#include <asm/param.h>
#include <asm/uaccess.h>
/*
@@ -132,54 +133,41 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
int reset = 1;
/* Collect the siginfo appropriate to this signal. */
- if (sig < SIGRTMIN) {
- /* XXX: As an extension, support queueing exactly
- one non-rt signal if SA_SIGINFO is set, so that
- we can get more detailed information about the
- cause of the signal. */
- /* Deciding not to init these couple of fields is
- more expensive that just initializing them. */
+ struct signal_queue *q, **pp;
+ pp = &current->sigqueue;
+ q = current->sigqueue;
+
+ /* Find the one we're interested in ... */
+ for ( ; q ; pp = &q->next, q = q->next)
+ if (q->info.si_signo == sig)
+ break;
+ if (q) {
+ if ((*pp = q->next) == NULL)
+ current->sigqueue_tail = pp;
+ *info = q->info;
+ kmem_cache_free(signal_queue_cachep,q);
+ atomic_dec(&nr_queued_signals);
+
+ /* then see if this signal is still pending. */
+ q = *pp;
+ while (q) {
+ if (q->info.si_signo == sig) {
+ reset = 0;
+ break;
+ }
+ q = q->next;
+ }
+ } else {
+ /* Ok, it wasn't in the queue. It must have
+ been sent either by a non-rt mechanism and
+ we ran out of queue space. So zero out the
+ info. */
info->si_signo = sig;
info->si_errno = 0;
info->si_code = 0;
info->si_pid = 0;
info->si_uid = 0;
- } else {
- struct signal_queue *q, **pp;
- pp = &current->sigqueue;
- q = current->sigqueue;
-
- /* Find the one we're interested in ... */
- for ( ; q ; pp = &q->next, q = q->next)
- if (q->info.si_signo == sig)
- break;
- if (q) {
- if ((*pp = q->next) == NULL)
- current->sigqueue_tail = pp;
- *info = q->info;
- kmem_cache_free(signal_queue_cachep,q);
- atomic_dec(&nr_queued_signals);
-
- /* then see if this signal is still pending. */
- q = *pp;
- while (q) {
- if (q->info.si_signo == sig) {
- reset = 0;
- break;
- }
- q = q->next;
- }
- } else {
- /* Ok, it wasn't in the queue. It must have
- been sent either by a non-rt mechanism and
- we ran out of queue space. So zero out the
- info. */
- info->si_signo = sig;
- info->si_errno = 0;
- info->si_code = 0;
- info->si_pid = 0;
- info->si_uid = 0;
- }
+ SET_SIGINFO_UID16(info->si_uid16, 0);
}
if (reset)
@@ -253,6 +241,8 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
unsigned long flags;
int ret;
+ struct signal_queue *q = 0;
+
#if DEBUG_SIG
printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
@@ -305,42 +295,38 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
if (ignored_signal(sig, t))
goto out;
- if (sig < SIGRTMIN) {
- /* Non-real-time signals are not queued. */
- /* XXX: As an extension, support queueing exactly one
- non-rt signal if SA_SIGINFO is set, so that we can
- get more detailed information about the cause of
- the signal. */
- if (sigismember(&t->signal, sig))
- goto out;
- } else {
- /* Real-time signals must be queued if sent by sigqueue, or
- some other real-time mechanism. It is implementation
- defined whether kill() does so. We attempt to do so, on
- the principle of least surprise, but since kill is not
- allowed to fail with EAGAIN when low on memory we just
- make sure at least one signal gets delivered and don't
- pass on the info struct. */
-
- struct signal_queue *q = 0;
-
- if (atomic_read(&nr_queued_signals) < max_queued_signals) {
- q = (struct signal_queue *)
- kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
- }
-
- if (q) {
- atomic_inc(&nr_queued_signals);
- q->next = NULL;
- *t->sigqueue_tail = q;
- t->sigqueue_tail = &q->next;
- switch ((unsigned long) info) {
+ /* Support queueing exactly one non-rt signal, so that we
+ can get more detailed information about the cause of
+ the signal. */
+ if (sig < SIGRTMIN && sigismember(&t->signal, sig))
+ goto out;
+
+ /* Real-time signals must be queued if sent by sigqueue, or
+ some other real-time mechanism. It is implementation
+ defined whether kill() does so. We attempt to do so, on
+ the principle of least surprise, but since kill is not
+ allowed to fail with EAGAIN when low on memory we just
+ make sure at least one signal gets delivered and don't
+ pass on the info struct. */
+
+ if (atomic_read(&nr_queued_signals) < max_queued_signals) {
+ q = (struct signal_queue *)
+ kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
+ }
+
+ if (q) {
+ atomic_inc(&nr_queued_signals);
+ q->next = NULL;
+ *t->sigqueue_tail = q;
+ t->sigqueue_tail = &q->next;
+ switch ((unsigned long) info) {
case 0:
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
q->info.si_pid = current->pid;
q->info.si_uid = current->uid;
+ SET_SIGINFO_UID16(q->info.si_uid16, current->uid);
break;
case 1:
q->info.si_signo = sig;
@@ -348,16 +334,16 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
q->info.si_code = SI_KERNEL;
q->info.si_pid = 0;
q->info.si_uid = 0;
+ SET_SIGINFO_UID16(q->info.si_uid16, 0);
break;
default:
q->info = *info;
break;
- }
- } else {
- /* Queue overflow, we have to abort. */
- ret = -EAGAIN;
- goto out;
}
+ } else {
+ /* Queue overflow, we have to abort. */
+ ret = -EAGAIN;
+ goto out;
}
sigaddset(&t->signal, sig);
@@ -730,11 +716,13 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
if (copy_from_user(&these, uthese, sizeof(these)))
return -EFAULT;
- else {
- /* Invert the set of allowed signals to get those we
- want to block. */
- signotset(&these);
- }
+
+ /*
+ * Invert the set of allowed signals to get those we
+ * want to block.
+ */
+ sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
+ signotset(&these);
if (uts) {
if (copy_from_user(&ts, uts, sizeof(ts)))
@@ -796,6 +784,7 @@ sys_kill(int pid, int sig)
info.si_code = SI_USER;
info.si_pid = current->pid;
info.si_uid = current->uid;
+ SET_SIGINFO_UID16(info.si_uid16, current->uid);
return kill_something_info(sig, &info, pid);
}
@@ -869,7 +858,8 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
if (q->info.si_signo != sig)
pp = &q->next;
else {
- *pp = q->next;
+ if ((*pp = q->next) == NULL)
+ current->sigqueue_tail = pp;
kmem_cache_free(signal_queue_cachep, q);
atomic_dec(&nr_queued_signals);
}
@@ -916,10 +906,18 @@ do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
goto out;
error = -EINVAL;
- if (ss_flags & ~SS_DISABLE)
+ /*
+ *
+ * Note - this code used to test ss_flags incorrectly
+ * old code may have been written using ss_flags==0
+ * to mean ss_flags==SS_ONSTACK (as this was the only
+ * way that worked) - this fix preserves that older
+ * mechanism
+ */
+ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
goto out;
- if (ss_flags & SS_DISABLE) {
+ if (ss_flags == SS_DISABLE) {
ss_size = 0;
ss_sp = NULL;
} else {