summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index bf44bd04c..7534f1d91 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -249,7 +249,7 @@ static inline int dup_mmap(struct mm_struct * mm)
tmp->vm_next = NULL;
file = tmp->vm_file;
if (file) {
- file->f_count++;
+ atomic_inc(&file->f_count);
if (tmp->vm_flags & VM_DENYWRITE)
file->f_dentry->d_inode->i_writecount--;
@@ -474,6 +474,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
if (!new_fds)
goto out_release;
+ newf->file_lock = RW_LOCK_UNLOCKED;
atomic_set(&newf->count, 1);
newf->max_fds = NR_OPEN;
newf->fd = new_fds;
@@ -485,7 +486,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
struct file *f = *old_fds++;
*new_fds = f;
if (f)
- f->f_count++;
+ atomic_inc(&f->f_count);
new_fds++;
}
/* This is long word aligned thus could use a optimized version */
@@ -556,13 +557,14 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
if (p->user) {
if (atomic_read(&p->user->count) >= p->rlim[RLIMIT_NPROC].rlim_cur)
goto bad_fork_free;
+ atomic_inc(&p->user->count);
}
{
struct task_struct **tslot;
tslot = find_empty_process();
if (!tslot)
- goto bad_fork_free;
+ goto bad_fork_cleanup_count;
p->tarray_ptr = tslot;
*tslot = p;
nr = tslot - &task[0];
@@ -666,8 +668,6 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
write_unlock_irq(&tasklist_lock);
nr_tasks++;
- if (p->user)
- atomic_inc(&p->user->count);
p->next_run = NULL;
p->prev_run = NULL;
@@ -695,6 +695,9 @@ bad_fork_cleanup:
__MOD_DEC_USE_COUNT(p->binfmt->module);
add_free_taskslot(p->tarray_ptr);
+bad_fork_cleanup_count:
+ if (p->user)
+ free_uid(p);
bad_fork_free:
free_task_struct(p);
goto bad_fork;