summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-08 12:37:17 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-08 12:37:17 +0000
commit9aa9eb41942b918f385ccabd2efdd6e7e4232165 (patch)
tree20bec7da036d31ec185dfc1dcc00753c7ac9b170 /kernel
parent87075e049581f880f01eb0b41aa6ac807b299e35 (diff)
Merge with Linux 2.4.0-test6-pre1.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c143
-rw-r--r--kernel/sys.c96
-rw-r--r--kernel/user.c154
5 files changed, 218 insertions, 180 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 53606a359..a13812119 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,7 +10,7 @@
O_TARGET := kernel.o
O_OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
- sysctl.o acct.o capability.o ptrace.o timer.o
+ sysctl.o acct.o capability.o ptrace.o timer.o user.o
OX_OBJS += signal.o sys.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 89dcd1a3b..993ba31f3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -38,7 +38,8 @@ void release(struct task_struct * p)
spin_unlock_irq(&runqueue_lock);
} while (has_cpu);
#endif
- free_uid(p);
+ atomic_dec(&p->user->processes);
+ free_uid(p->user);
unhash_process(p);
release_thread(p);
diff --git a/kernel/fork.c b/kernel/fork.c
index 683f047ad..594ee79f3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,125 +37,6 @@ kmem_cache_t *mm_cachep;
struct task_struct *pidhash[PIDHASH_SZ];
-/* UID task count cache, to prevent walking entire process list every
- * single fork() operation.
- */
-#define UIDHASH_SZ (PIDHASH_SZ >> 2)
-
-static struct user_struct {
- atomic_t count;
- struct user_struct *next, **pprev;
- unsigned int uid;
-} *uidhash[UIDHASH_SZ];
-
-spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
-
-kmem_cache_t *uid_cachep;
-
-#define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
-
-/*
- * These routines must be called with the uidhash spinlock held!
- */
-static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
-{
- if((up->next = uidhash[hashent]) != NULL)
- uidhash[hashent]->pprev = &up->next;
- up->pprev = &uidhash[hashent];
- uidhash[hashent] = up;
-}
-
-static inline void uid_hash_remove(struct user_struct *up)
-{
- if(up->next)
- up->next->pprev = up->pprev;
- *up->pprev = up->next;
-}
-
-static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
-{
- struct user_struct *up, *next;
-
- next = uidhash[hashent];
- for (;;) {
- up = next;
- if (next) {
- next = up->next;
- if (up->uid != uid)
- continue;
- atomic_inc(&up->count);
- }
- break;
- }
- return up;
-}
-
-/*
- * For SMP, we need to re-test the user struct counter
- * after having aquired the spinlock. This allows us to do
- * the common case (not freeing anything) without having
- * any locking.
- */
-#ifdef CONFIG_SMP
- #define uid_hash_free(up) (!atomic_read(&(up)->count))
-#else
- #define uid_hash_free(up) (1)
-#endif
-
-void free_uid(struct task_struct *p)
-{
- struct user_struct *up = p->user;
-
- if (up) {
- p->user = NULL;
- if (atomic_dec_and_test(&up->count)) {
- spin_lock(&uidhash_lock);
- if (uid_hash_free(up)) {
- uid_hash_remove(up);
- kmem_cache_free(uid_cachep, up);
- }
- spin_unlock(&uidhash_lock);
- }
- }
-}
-
-int alloc_uid(struct task_struct *p)
-{
- unsigned int hashent = uidhashfn(p->uid);
- struct user_struct *up;
-
- spin_lock(&uidhash_lock);
- up = uid_hash_find(p->uid, hashent);
- spin_unlock(&uidhash_lock);
-
- if (!up) {
- struct user_struct *new;
-
- new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
- if (!new)
- return -EAGAIN;
- new->uid = p->uid;
- atomic_set(&new->count, 1);
-
- /*
- * Before adding this, check whether we raced
- * on adding the same user already..
- */
- spin_lock(&uidhash_lock);
- up = uid_hash_find(p->uid, hashent);
- if (up) {
- kmem_cache_free(uid_cachep, new);
- } else {
- uid_hash_insert(new, hashent);
- up = new;
- }
- spin_unlock(&uidhash_lock);
-
- }
- p->user = up;
- return 0;
-}
-
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
@@ -185,17 +66,6 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
void __init fork_init(unsigned long mempages)
{
- int i;
-
- uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
- 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
- if(!uid_cachep)
- panic("Cannot create uid taskcount SLAB cache\n");
-
- for(i = 0; i < UIDHASH_SZ; i++)
- uidhash[i] = 0;
-
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
@@ -664,11 +534,10 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
lock_kernel();
retval = -EAGAIN;
- if (p->user) {
- if (atomic_read(&p->user->count) >= p->rlim[RLIMIT_NPROC].rlim_cur)
- goto bad_fork_free;
- atomic_inc(&p->user->count);
- }
+ if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
+ goto bad_fork_free;
+ atomic_inc(&p->user->__count);
+ atomic_inc(&p->user->processes);
/*
* Counter increases are protected by
@@ -801,8 +670,8 @@ bad_fork_cleanup:
if (p->binfmt && p->binfmt->module)
__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
- if (p->user)
- free_uid(p);
+ atomic_dec(&p->user->processes);
+ free_uid(p->user);
bad_fork_free:
free_task_struct(p);
goto bad_fork;
diff --git a/kernel/sys.c b/kernel/sys.c
index 2e6b84fa7..0f3181f70 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -466,6 +466,28 @@ extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
}
}
+static int set_user(uid_t new_ruid)
+{
+ struct user_struct *new_user, *old_user;
+
+ /* What if a process setreuid()'s and this brings the
+ * new uid over his NPROC rlimit? We can check this now
+ * cheaply with the new uid cache, so if it matters
+ * we should be checking for it. -DaveM
+ */
+ new_user = alloc_uid(new_ruid);
+ if (!new_user)
+ return -EAGAIN;
+ old_user = current->user;
+ atomic_dec(&old_user->processes);
+ atomic_inc(&new_user->processes);
+
+ current->uid = new_ruid;
+ current->user = new_user;
+ free_uid(old_user);
+ return 0;
+}
+
/*
* Unprivileged users may change the real uid to the effective uid
* or vice versa. (BSD-style)
@@ -483,28 +505,33 @@ extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
*/
asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
{
- int old_ruid, old_euid, old_suid, new_ruid;
+ int old_ruid, old_euid, old_suid, new_ruid, new_euid;
new_ruid = old_ruid = current->uid;
- old_euid = current->euid;
+ new_euid = old_euid = current->euid;
old_suid = current->suid;
+
if (ruid != (uid_t) -1) {
- if ((old_ruid == ruid) ||
- (current->euid==ruid) ||
- capable(CAP_SETUID))
- new_ruid = ruid;
- else
+ new_ruid = ruid;
+ if ((old_ruid != ruid) &&
+ (current->euid != ruid) &&
+ !capable(CAP_SETUID))
return -EPERM;
}
+
if (euid != (uid_t) -1) {
- if ((old_ruid == euid) ||
- (current->euid == euid) ||
- (current->suid == euid) ||
- capable(CAP_SETUID))
- current->fsuid = current->euid = euid;
- else
+ new_euid = euid;
+ if ((old_ruid != euid) &&
+ (current->euid != euid) &&
+ (current->suid != euid) &&
+ !capable(CAP_SETUID))
return -EPERM;
}
+
+ if (new_ruid != old_ruid && set_user(new_ruid) < 0)
+ return -EAGAIN;
+
+ current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old_ruid))
current->suid = current->euid;
@@ -512,17 +539,6 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
if (current->euid != old_euid)
current->dumpable = 0;
- if(new_ruid != old_ruid) {
- /* What if a process setreuid()'s and this brings the
- * new uid over his NPROC rlimit? We can check this now
- * cheaply with the new uid cache, so if it matters
- * we should be checking for it. -DaveM
- */
- free_uid(current);
- current->uid = new_ruid;
- alloc_uid(current);
- }
-
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
}
@@ -550,22 +566,22 @@ asmlinkage long sys_setuid(uid_t uid)
old_ruid = new_ruid = current->uid;
old_suid = current->suid;
- if (capable(CAP_SETUID))
- new_ruid = current->euid = current->suid = current->fsuid = uid;
- else if ((uid == current->uid) || (uid == current->suid))
- current->fsuid = current->euid = uid;
- else
+ if (capable(CAP_SETUID)) {
+ if (uid != old_ruid && set_user(uid) < 0)
+ return -EAGAIN;
+ current->suid = uid;
+ } else if (uid == current->uid) {
+ /* Nothing - just set fsuid/euid */
+ } else if (uid == current->suid) {
+ if (set_user(uid) < 0)
+ return -EAGAIN;
+ } else
return -EPERM;
- if (current->euid != old_euid)
- current->dumpable = 0;
+ current->fsuid = current->euid = uid;
- if (new_ruid != old_ruid) {
- /* See comment above about NPROC rlimit issues... */
- free_uid(current);
- current->uid = new_ruid;
- alloc_uid(current);
- }
+ if (old_euid != uid)
+ current->dumpable = 0;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
@@ -597,10 +613,8 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
return -EPERM;
}
if (ruid != (uid_t) -1) {
- /* See above commentary about NPROC rlimit issues here. */
- free_uid(current);
- current->uid = ruid;
- alloc_uid(current);
+ if (ruid != current->uid && set_user(ruid) < 0)
+ return -EAGAIN;
}
if (euid != (uid_t) -1) {
if (euid != current->euid)
diff --git a/kernel/user.c b/kernel/user.c
new file mode 100644
index 000000000..d9f96da0a
--- /dev/null
+++ b/kernel/user.c
@@ -0,0 +1,154 @@
+/*
+ * The "user cache".
+ *
+ * (C) Copyright 1991-2000 Linus Torvalds
+ *
+ * We have a per-user structure to keep track of how many
+ * processes, files etc the user has claimed, in order to be
+ * able to have per-user limits for system resources.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+/*
+ * UID task count cache, to get fast user lookup in "alloc_uid"
+ * when changing user ID's (ie setuid() and friends).
+ */
+#define UIDHASH_SZ (256)
+
+static struct user_struct *uidhash[UIDHASH_SZ];
+
+spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
+
+struct user_struct root_user = {
+ __count: ATOMIC_INIT(1),
+ processes: ATOMIC_INIT(1),
+ files: ATOMIC_INIT(0)
+};
+
+static kmem_cache_t *uid_cachep;
+
+#define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
+
+/*
+ * These routines must be called with the uidhash spinlock held!
+ */
+static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
+{
+ if((up->next = uidhash[hashent]) != NULL)
+ uidhash[hashent]->pprev = &up->next;
+ up->pprev = &uidhash[hashent];
+ uidhash[hashent] = up;
+}
+
+static inline void uid_hash_remove(struct user_struct *up)
+{
+ if(up->next)
+ up->next->pprev = up->pprev;
+ *up->pprev = up->next;
+}
+
+static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
+{
+ struct user_struct *up, *next;
+
+ next = uidhash[hashent];
+ for (;;) {
+ up = next;
+ if (next) {
+ next = up->next;
+ if (up->uid != uid)
+ continue;
+ atomic_inc(&up->__count);
+ }
+ break;
+ }
+ return up;
+}
+
+/*
+ * For SMP, we need to re-test the user struct counter
+ * after having aquired the spinlock. This allows us to do
+ * the common case (not freeing anything) without having
+ * any locking.
+ */
+#ifdef CONFIG_SMP
+ #define uid_hash_free(up) (!atomic_read(&(up)->__count))
+#else
+ #define uid_hash_free(up) (1)
+#endif
+
+void free_uid(struct user_struct *up)
+{
+ if (up) {
+ if (atomic_dec_and_test(&up->__count)) {
+ spin_lock(&uidhash_lock);
+ if (uid_hash_free(up)) {
+ uid_hash_remove(up);
+ kmem_cache_free(uid_cachep, up);
+ }
+ spin_unlock(&uidhash_lock);
+ }
+ }
+}
+
+struct user_struct * alloc_uid(uid_t uid)
+{
+ unsigned int hashent = uidhashfn(uid);
+ struct user_struct *up;
+
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ spin_unlock(&uidhash_lock);
+
+ if (!up) {
+ struct user_struct *new;
+
+ new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+ if (!new)
+ return NULL;
+ new->uid = uid;
+ atomic_set(&new->__count, 1);
+ atomic_set(&new->processes, 0);
+ atomic_set(&new->files, 0);
+
+ /*
+ * Before adding this, check whether we raced
+ * on adding the same user already..
+ */
+ spin_lock(&uidhash_lock);
+ up = uid_hash_find(uid, hashent);
+ if (up) {
+ kmem_cache_free(uid_cachep, new);
+ } else {
+ uid_hash_insert(new, hashent);
+ up = new;
+ }
+ spin_unlock(&uidhash_lock);
+
+ }
+ return up;
+}
+
+
+static int __init uid_cache_init(void)
+{
+ int i;
+
+ uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
+ 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!uid_cachep)
+ panic("Cannot create uid taskcount SLAB cache\n");
+
+ for(i = 0; i < UIDHASH_SZ; i++)
+ uidhash[i] = 0;
+
+ /* Insert the root user immediately - init already runs with this */
+ uid_hash_insert(&root_user, uidhashfn(0));
+ return 0;
+}
+
+module_init(uid_cache_init);