summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
commit1a1d77dd589de5a567fa95e36aa6999c704ceca4 (patch)
tree141e31f89f18b9fe0831f31852e0435ceaccafc5 /kernel
parentfb9c690a18b3d66925a65b17441c37fa14d4370b (diff)
Merge with 2.4.0-test7.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c77
-rw-r--r--kernel/ksyms.c1
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c105
-rw-r--r--kernel/timer.c6
6 files changed, 147 insertions, 49 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 3e95fe878..74d2cf5f1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -229,7 +229,7 @@ static inline void __put_fs_struct(struct fs_struct *fs)
dput(fs->altroot);
mntput(fs->altrootmnt);
}
- kfree(fs);
+ kmem_cache_free(fs_cachep, fs);
}
}
@@ -264,7 +264,7 @@ static inline void __exit_sighand(struct task_struct *tsk)
tsk->sig = NULL;
spin_unlock_irq(&tsk->sigmask_lock);
if (atomic_dec_and_test(&sig->count))
- kfree(sig);
+ kmem_cache_free(sigact_cachep, sig);
}
flush_signals(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 641de8b22..4ab0976b1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -32,9 +32,6 @@ int max_threads;
unsigned long total_forks; /* Handle normal Linux uptimes. */
int last_pid;
-/* SLAB cache for mm_struct's. */
-kmem_cache_t *mm_cachep;
-
struct task_struct *pidhash[PIDHASH_SZ];
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
@@ -314,18 +311,19 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
tsk->mm = mm;
tsk->active_mm = mm;
- /*
- * child gets a private LDT (if there was an LDT in the parent)
- */
- copy_segments(tsk, mm);
-
down(&current->mm->mmap_sem);
retval = dup_mmap(mm);
up(&current->mm->mmap_sem);
if (retval)
goto free_pt;
- init_new_context(tsk,mm);
+ /*
+ * child gets a private LDT (if there was an LDT in the parent)
+ */
+ copy_segments(tsk, mm);
+
+ if (init_new_context(tsk,mm))
+ goto free_pt;
good_mm:
tsk->mm = mm;
@@ -340,7 +338,7 @@ fail_nomem:
static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
- struct fs_struct *fs = kmalloc(sizeof(*old), GFP_KERNEL);
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
@@ -506,7 +504,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
atomic_inc(&current->sig->count);
return 0;
}
- tsk->sig = kmalloc(sizeof(*tsk->sig), GFP_KERNEL);
+ tsk->sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (!tsk->sig)
return -1;
spin_lock_init(&tsk->sig->siglock);
@@ -553,8 +551,6 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
*p = *current;
- lock_kernel();
-
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
goto bad_fork_free;
@@ -671,11 +667,12 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
nr_threads++;
write_unlock_irq(&tasklist_lock);
+ if (p->ptrace & PT_PTRACED)
+ send_sig(SIGSTOP, p, 1);
+
wake_up_process(p); /* do this last */
++total_forks;
-bad_fork:
- unlock_kernel();
fork_out:
if ((clone_flags & CLONE_VFORK) && (retval > 0))
down(&sem);
@@ -696,5 +693,53 @@ bad_fork_cleanup_count:
free_uid(p->user);
bad_fork_free:
free_task_struct(p);
- goto bad_fork;
+ goto fork_out;
+}
+
+/* SLAB cache for signal_struct structures (tsk->sig) */
+kmem_cache_t *sigact_cachep;
+
+/* SLAB cache for files_struct structures (tsk->files) */
+kmem_cache_t *files_cachep;
+
+/* SLAB cache for fs_struct structures (tsk->fs) */
+kmem_cache_t *fs_cachep;
+
+/* SLAB cache for vm_area_struct structures */
+kmem_cache_t *vm_area_cachep;
+
+/* SLAB cache for mm_struct structures (tsk->mm) */
+kmem_cache_t *mm_cachep;
+
+void __init proc_caches_init(void)
+{
+ sigact_cachep = kmem_cache_create("signal_act",
+ sizeof(struct signal_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!sigact_cachep)
+ panic("Cannot create signal action SLAB cache");
+
+ files_cachep = kmem_cache_create("files_cache",
+ sizeof(struct files_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!files_cachep)
+ panic("Cannot create files SLAB cache");
+
+ fs_cachep = kmem_cache_create("fs_cache",
+ sizeof(struct fs_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!fs_cachep)
+ panic("Cannot create fs_struct SLAB cache");
+
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!vm_area_cachep)
+ panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
+
+ mm_cachep = kmem_cache_create("mm_struct",
+ sizeof(struct mm_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!mm_cachep)
+ panic("vma_init: Cannot alloc mm_struct SLAB cache");
}
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index bcc6d2e31..660d492f6 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -527,6 +527,7 @@ EXPORT_SYMBOL(get_fast_time);
/* library functions */
EXPORT_SYMBOL(strnicmp);
EXPORT_SYMBOL(strspn);
+EXPORT_SYMBOL(strsep);
/* software interrupts */
EXPORT_SYMBOL(tasklet_hi_vec);
diff --git a/kernel/sched.c b/kernel/sched.c
index 688b30c7b..fda8b5eea 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -247,7 +247,7 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
* one will have the least active cache context.) Also find
* the executing process which has the least priority.
*/
- oldest_idle = -1ULL;
+ oldest_idle = (cycles_t) -1;
target_tsk = NULL;
max_prio = 1;
@@ -454,7 +454,6 @@ signed long schedule_timeout(signed long timeout)
*/
static inline void __schedule_tail(struct task_struct *prev)
{
- current->need_resched |= prev->need_resched;
#ifdef CONFIG_SMP
if ((prev->state == TASK_RUNNING) &&
(prev != idle_task(smp_processor_id()))) {
diff --git a/kernel/signal.c b/kernel/signal.c
index 4e73949da..b64225778 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -46,6 +46,42 @@ void __init signals_init(void)
}
+/* Given the mask, find the first available signal that should be serviced. */
+
+static int
+next_signal(sigset_t *signal, sigset_t *mask)
+{
+ unsigned long i, *s, *m, x;
+ int sig = 0;
+
+ s = signal->sig;
+ m = mask->sig;
+ switch (_NSIG_WORDS) {
+ default:
+ for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
+ if ((x = *s &~ *m) != 0) {
+ sig = ffz(~x) + i*_NSIG_BPW + 1;
+ break;
+ }
+ break;
+
+ case 2: if ((x = s[0] &~ m[0]) != 0)
+ sig = 1;
+ else if ((x = s[1] &~ m[1]) != 0)
+ sig = _NSIG_BPW + 1;
+ else
+ break;
+ sig += ffz(~x);
+ break;
+
+ case 1: if ((x = *s &~ *m) != 0)
+ sig = ffz(~x) + 1;
+ break;
+ }
+
+ return sig;
+}
+
/*
* Flush all pending signals for a task.
*/
@@ -87,6 +123,32 @@ flush_signal_handlers(struct task_struct *t)
}
}
+/* Notify the system that a driver wants to block all signals for this
+ * process, and wants to be notified if any signals at all were to be
+ * sent/acted upon. If the notifier routine returns non-zero, then the
+ * signal will be acted upon after all. If the notifier routine returns 0,
+ * then then signal will be blocked. Only one block per process is
+ * allowed. priv is a pointer to private data that the notifier routine
+ * can use to determine if the signal should be blocked or not. */
+
+void
+block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
+{
+ current->notifier_mask = mask;
+ current->notifier_data = priv;
+ current->notifier = notifier;
+}
+
+/* Notify the system that blocking has ended. */
+
+void
+unblock_all_signals(void)
+{
+ current->notifier = NULL;
+ current->notifier_data = NULL;
+ recalc_sigpending(current);
+}
+
/*
* Dequeue a signal and return the element to the caller, which is
* expected to free it.
@@ -97,7 +159,6 @@ flush_signal_handlers(struct task_struct *t)
int
dequeue_signal(sigset_t *mask, siginfo_t *info)
{
- unsigned long i, *s, *m, x;
int sig = 0;
#if DEBUG_SIG
@@ -105,30 +166,22 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
signal_pending(current));
#endif
- /* Find the first desired signal that is pending. */
- s = current->signal.sig;
- m = mask->sig;
- switch (_NSIG_WORDS) {
- default:
- for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
- if ((x = *s &~ *m) != 0) {
- sig = ffz(~x) + i*_NSIG_BPW + 1;
- break;
- }
- break;
-
- case 2: if ((x = s[0] &~ m[0]) != 0)
- sig = 1;
- else if ((x = s[1] &~ m[1]) != 0)
- sig = _NSIG_BPW + 1;
- else
- break;
- sig += ffz(~x);
- break;
-
- case 1: if ((x = *s &~ *m) != 0)
- sig = ffz(~x) + 1;
- break;
+ sig = next_signal(&current->signal, mask);
+ if (current->notifier) {
+ sigset_t merged;
+ int i;
+ int altsig;
+
+ for (i = 0; i < _NSIG_WORDS; i++)
+ merged.sig[i] = mask->sig[i]
+ | current->notifier_mask->sig[i];
+ altsig = next_signal(&current->signal, &merged);
+ if (sig != altsig) {
+ if (!(current->notifier)(current->notifier_data)) {
+ current->sigpending = 0;
+ return 0;
+ }
+ }
}
if (sig) {
@@ -658,6 +711,8 @@ EXPORT_SYMBOL(notify_parent);
EXPORT_SYMBOL(recalc_sigpending);
EXPORT_SYMBOL(send_sig);
EXPORT_SYMBOL(send_sig_info);
+EXPORT_SYMBOL(block_all_signals);
+EXPORT_SYMBOL(unblock_all_signals);
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 6b8538715..00ab398b4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -180,17 +180,15 @@ void add_timer(struct timer_list *timer)
unsigned long flags;
spin_lock_irqsave(&timerlist_lock, flags);
- if (timer->list.next)
+ if (timer_pending(timer))
goto bug;
internal_add_timer(timer);
-out:
spin_unlock_irqrestore(&timerlist_lock, flags);
return;
-
bug:
+ spin_unlock_irqrestore(&timerlist_lock, flags);
printk("bug: kernel timer added twice at %p.\n",
__builtin_return_address(0));
- goto out;
}
static inline int detach_timer (struct timer_list *timer)