summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
commit1a1d77dd589de5a567fa95e36aa6999c704ceca4 (patch)
tree141e31f89f18b9fe0831f31852e0435ceaccafc5 /kernel/fork.c
parentfb9c690a18b3d66925a65b17441c37fa14d4370b (diff)
Merge with 2.4.0-test7.
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c77
1 files changed, 61 insertions, 16 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 641de8b22..4ab0976b1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -32,9 +32,6 @@ int max_threads;
unsigned long total_forks; /* Handle normal Linux uptimes. */
int last_pid;
-/* SLAB cache for mm_struct's. */
-kmem_cache_t *mm_cachep;
-
struct task_struct *pidhash[PIDHASH_SZ];
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
@@ -314,18 +311,19 @@ static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
tsk->mm = mm;
tsk->active_mm = mm;
- /*
- * child gets a private LDT (if there was an LDT in the parent)
- */
- copy_segments(tsk, mm);
-
down(&current->mm->mmap_sem);
retval = dup_mmap(mm);
up(&current->mm->mmap_sem);
if (retval)
goto free_pt;
- init_new_context(tsk,mm);
+ /*
+ * child gets a private LDT (if there was an LDT in the parent)
+ */
+ copy_segments(tsk, mm);
+
+ if (init_new_context(tsk,mm))
+ goto free_pt;
good_mm:
tsk->mm = mm;
@@ -340,7 +338,7 @@ fail_nomem:
static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
- struct fs_struct *fs = kmalloc(sizeof(*old), GFP_KERNEL);
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
@@ -506,7 +504,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
atomic_inc(&current->sig->count);
return 0;
}
- tsk->sig = kmalloc(sizeof(*tsk->sig), GFP_KERNEL);
+ tsk->sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (!tsk->sig)
return -1;
spin_lock_init(&tsk->sig->siglock);
@@ -553,8 +551,6 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
*p = *current;
- lock_kernel();
-
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
goto bad_fork_free;
@@ -671,11 +667,12 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
nr_threads++;
write_unlock_irq(&tasklist_lock);
+ if (p->ptrace & PT_PTRACED)
+ send_sig(SIGSTOP, p, 1);
+
wake_up_process(p); /* do this last */
++total_forks;
-bad_fork:
- unlock_kernel();
fork_out:
if ((clone_flags & CLONE_VFORK) && (retval > 0))
down(&sem);
@@ -696,5 +693,53 @@ bad_fork_cleanup_count:
free_uid(p->user);
bad_fork_free:
free_task_struct(p);
- goto bad_fork;
+ goto fork_out;
+}
+
+/* SLAB cache for signal_struct structures (tsk->sig) */
+kmem_cache_t *sigact_cachep;
+
+/* SLAB cache for files_struct structures (tsk->files) */
+kmem_cache_t *files_cachep;
+
+/* SLAB cache for fs_struct structures (tsk->fs) */
+kmem_cache_t *fs_cachep;
+
+/* SLAB cache for vm_area_struct structures */
+kmem_cache_t *vm_area_cachep;
+
+/* SLAB cache for mm_struct structures (tsk->mm) */
+kmem_cache_t *mm_cachep;
+
+void __init proc_caches_init(void)
+{
+ sigact_cachep = kmem_cache_create("signal_act",
+ sizeof(struct signal_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!sigact_cachep)
+ panic("Cannot create signal action SLAB cache");
+
+ files_cachep = kmem_cache_create("files_cache",
+ sizeof(struct files_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!files_cachep)
+ panic("Cannot create files SLAB cache");
+
+ fs_cachep = kmem_cache_create("fs_cache",
+ sizeof(struct fs_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!fs_cachep)
+ panic("Cannot create fs_struct SLAB cache");
+
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!vm_area_cachep)
+ panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
+
+ mm_cachep = kmem_cache_create("mm_struct",
+ sizeof(struct mm_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!mm_cachep)
+ panic("vma_init: Cannot alloc mm_struct SLAB cache");
}