summaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c89
1 files changed, 64 insertions, 25 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 5c714fe73..a431ffe2b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -57,36 +57,54 @@ kmem_cache_t *uid_cachep;
#define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
+/*
+ * These routines must be called with the uidhash spinlock held!
+ */
static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
{
- spin_lock(&uidhash_lock);
if((up->next = uidhash[hashent]) != NULL)
uidhash[hashent]->pprev = &up->next;
up->pprev = &uidhash[hashent];
uidhash[hashent] = up;
- spin_unlock(&uidhash_lock);
}
static inline void uid_hash_remove(struct user_struct *up)
{
- spin_lock(&uidhash_lock);
if(up->next)
up->next->pprev = up->pprev;
*up->pprev = up->next;
- spin_unlock(&uidhash_lock);
}
-static inline struct user_struct *uid_find(unsigned short uid, unsigned int hashent)
+static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
{
- struct user_struct *up;
-
- spin_lock(&uidhash_lock);
- for(up = uidhash[hashent]; (up && up->uid != uid); up = up->next)
- ;
- spin_unlock(&uidhash_lock);
+ struct user_struct *up, *next;
+
+ next = uidhash[hashent];
+ for (;;) {
+ up = next;
+ if (next) {
+ next = up->next;
+ if (up->uid != uid)
+ continue;
+ atomic_inc(&up->count);
+ }
+ break;
+ }
return up;
}
+/*
+ * For SMP, we need to re-test the user struct counter
+ * after having aquired the spinlock. This allows us to do
+ * the common case (not freeing anything) without having
+ * any locking.
+ */
+#ifdef __SMP__
+ #define uid_hash_free(up) (!atomic_read(&(up)->count))
+#else
+ #define uid_hash_free(up) (1)
+#endif
+
void free_uid(struct task_struct *p)
{
struct user_struct *up = p->user;
@@ -94,8 +112,12 @@ void free_uid(struct task_struct *p)
if (up) {
p->user = NULL;
if (atomic_dec_and_test(&up->count)) {
- uid_hash_remove(up);
- kmem_cache_free(uid_cachep, up);
+ spin_lock(&uidhash_lock);
+ if (uid_hash_free(up)) {
+ uid_hash_remove(up);
+ kmem_cache_free(uid_cachep, up);
+ }
+ spin_unlock(&uidhash_lock);
}
}
}
@@ -103,20 +125,37 @@ void free_uid(struct task_struct *p)
int alloc_uid(struct task_struct *p)
{
unsigned int hashent = uidhashfn(p->uid);
- struct user_struct *up = uid_find(p->uid, hashent);
+ struct user_struct *up;
+
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(p->uid, hashent);
+ spin_unlock(&uidhash_lock);
- p->user = up;
if (!up) {
- up = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
- if (!up)
+ struct user_struct *new;
+
+ new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+ if (!new)
return -EAGAIN;
- p->user = up;
- up->uid = p->uid;
- atomic_set(&up->count, 0);
- uid_hash_insert(up, hashent);
- }
+ new->uid = p->uid;
+ atomic_set(&new->count, 1);
- atomic_inc(&up->count);
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(p->uid, hashent);
+ if (up) {
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock(&uidhash_lock);
+
+ }
+ p->user = up;
return 0;
}
@@ -172,8 +211,8 @@ inside:
if(last_pid & 0xffff8000)
last_pid = 300;
next_safe = PID_MAX;
- goto repeat;
}
+ goto repeat;
}
if(p->pid > last_pid && next_safe > p->pid)
next_safe = p->pid;
@@ -510,6 +549,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
down(&current->mm->mmap_sem);
lock_kernel();
+ retval = -EAGAIN;
if (p->user) {
if (atomic_read(&p->user->count) >= p->rlim[RLIMIT_NPROC].rlim_cur)
goto bad_fork_free;
@@ -518,7 +558,6 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
{
struct task_struct **tslot;
tslot = find_empty_process();
- retval = -EAGAIN;
if (!tslot)
goto bad_fork_free;
p->tarray_ptr = tslot;