summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
commit545f435ebcfd94a1e7c20b46efe81b4d6ac4e698 (patch)
treee9ce4bc598d06374bda906f18365984bf22a526a /kernel/fork.c
parent4291a610eef89d0d5c69d9a10ee6560e1aa36c74 (diff)
Merge with Linux 2.1.55. More bugfixes and goodies from my private
CVS archive.
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c111
1 files changed, 77 insertions, 34 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 900d6015c..4974d5dac 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -206,6 +206,7 @@ inside:
static inline int dup_mmap(struct mm_struct * mm)
{
struct vm_area_struct * mpnt, *tmp, **pprev;
+ int retval;
mm->mmap = mm->mmap_cache = NULL;
flush_cache_mm(current->mm);
@@ -213,19 +214,17 @@ static inline int dup_mmap(struct mm_struct * mm)
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
struct dentry *dentry;
+ retval = -ENOMEM;
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!tmp) {
- exit_mmap(mm);
- flush_tlb_mm(current->mm);
- return -ENOMEM;
- }
+ if (!tmp)
+ goto fail_nomem;
*tmp = *mpnt;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
dentry = tmp->vm_dentry;
if (dentry) {
- dentry->d_count++;
+ dget(dentry);
if (tmp->vm_flags & VM_DENYWRITE)
dentry->d_inode->i_writecount--;
@@ -236,59 +235,103 @@ static inline int dup_mmap(struct mm_struct * mm)
mpnt->vm_next_share = tmp;
tmp->vm_pprev_share = &mpnt->vm_next_share;
}
- if (copy_page_range(mm, current->mm, tmp)) {
- exit_mmap(mm);
- flush_tlb_mm(current->mm);
- return -ENOMEM;
- }
- if (tmp->vm_ops && tmp->vm_ops->open)
+
+ /* Copy the pages, but defer checking for errors */
+ retval = copy_page_range(mm, current->mm, tmp);
+ if (!retval && tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
- /* Ok, finally safe to link it in. */
+ /*
+ * Link in the new vma even if an error occurred,
+ * so that exit_mmap() can clean up the mess.
+ */
if((tmp->vm_next = *pprev) != NULL)
(*pprev)->vm_pprev = &tmp->vm_next;
*pprev = tmp;
tmp->vm_pprev = pprev;
pprev = &tmp->vm_next;
+ if (retval)
+ goto fail_nomem;
}
flush_tlb_mm(current->mm);
return 0;
+
+fail_nomem:
+ flush_tlb_mm(current->mm);
+ return retval;
}
-static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
+/*
+ * Allocate and initialize an mm_struct.
+ */
+struct mm_struct * mm_alloc(void)
{
- if (!(clone_flags & CLONE_VM)) {
- struct mm_struct * mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
- if (!mm)
- return -1;
+ struct mm_struct * mm;
+
+ mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
+ if (mm) {
*mm = *current->mm;
init_new_context(mm);
mm->count = 1;
mm->def_flags = 0;
+ mm->mmap_sem = MUTEX;
+ mm->pgd = NULL;
+ mm->mmap = mm->mmap_cache = NULL;
/* It has not run yet, so cannot be present in anyone's
* cache or tlb.
*/
mm->cpu_vm_mask = 0;
+ }
+ return mm;
+}
- tsk->mm = mm;
- tsk->min_flt = tsk->maj_flt = 0;
- tsk->cmin_flt = tsk->cmaj_flt = 0;
- tsk->nswap = tsk->cnswap = 0;
- if (new_page_tables(tsk))
- goto free_mm;
- if (dup_mmap(mm)) {
- free_page_tables(mm);
-free_mm:
- kmem_cache_free(mm_cachep, mm);
- return -1;
- }
+/*
+ * Decrement the use count and release all resources for an mm.
+ */
+void mmput(struct mm_struct *mm)
+{
+ if (!--mm->count) {
+ exit_mmap(mm);
+ free_page_tables(mm);
+ kmem_cache_free(mm_cachep, mm);
+ }
+}
+
+static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
+{
+ struct mm_struct * mm;
+ int retval;
+
+ if (clone_flags & CLONE_VM) {
+ mmget(current->mm);
+ SET_PAGE_DIR(tsk, current->mm->pgd);
return 0;
}
- current->mm->count++;
- SET_PAGE_DIR(tsk, current->mm->pgd);
+
+ retval = -ENOMEM;
+ mm = mm_alloc();
+ if (!mm)
+ goto fail_nomem;
+
+ tsk->mm = mm;
+ tsk->min_flt = tsk->maj_flt = 0;
+ tsk->cmin_flt = tsk->cmaj_flt = 0;
+ tsk->nswap = tsk->cnswap = 0;
+ retval = new_page_tables(tsk);
+ if (retval)
+ goto free_mm;
+ retval = dup_mmap(mm);
+ if (retval)
+ goto free_mm;
return 0;
+
+free_mm:
+ tsk->mm = NULL;
+ mmput(mm);
+fail_nomem:
+ return retval;
}
static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
@@ -477,9 +520,9 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
bad_fork_cleanup_sighand:
exit_sighand(p);
bad_fork_cleanup_fs:
- exit_fs(p);
+ exit_fs(p); /* blocking */
bad_fork_cleanup_files:
- exit_files(p);
+ exit_files(p); /* blocking */
bad_fork_cleanup:
charge_uid(current, -1);
if (p->exec_domain && p->exec_domain->module)