summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-04-05 04:55:58 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-04-05 04:55:58 +0000
commit74a9f2e1b4d3ab45a9f72cb5b556c9f521524ab3 (patch)
tree7c4cdb103ab1b388c9852a88bd6fb1e73eba0b5c /kernel
parentee6374c8b0d333c08061c6a97bc77090d7461225 (diff)
Merge with Linux 2.4.3.
Note that mingetty does no longer work with serial console, you have to switch to another getty like getty_ps. This commit also includes a fix for a setitimer bug which did prevent getty_ps from working on older kernels.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/ksyms.c1
-rw-r--r--kernel/pm.c62
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/sys.c120
7 files changed, 187 insertions, 55 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index a836dd1a6..3e21f9fce 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -315,13 +315,13 @@ static void do_acct_process(long exitcode, struct file *file)
vsize = 0;
if (current->mm) {
struct vm_area_struct *vma;
- down(&current->mm->mmap_sem);
+ down_read(&current->mm->mmap_sem);
vma = current->mm->mmap;
while (vma) {
vsize += vma->vm_end - vma->vm_start;
vma = vma->vm_next;
}
- up(&current->mm->mmap_sem);
+ up_read(&current->mm->mmap_sem);
}
vsize = vsize / 1024;
ac.ac_mem = encode_comp_t(vsize);
diff --git a/kernel/fork.c b/kernel/fork.c
index 6f0582cbf..c1d374a0a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -201,7 +201,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
- init_MUTEX(&mm->mmap_sem);
+ init_rwsem(&mm->mmap_sem);
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
mm->pgd = pgd_alloc();
if (mm->pgd)
@@ -314,9 +314,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
if (!mm_init(mm))
goto fail_nomem;
- down(&oldmm->mmap_sem);
+ down_write(&oldmm->mmap_sem);
retval = dup_mmap(mm);
- up(&oldmm->mmap_sem);
+ up_write(&oldmm->mmap_sem);
/*
* Add it to the mmlist after the parent.
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index ca9001dbe..1848a573e 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -121,6 +121,7 @@ EXPORT_SYMBOL(deactivate_page);
EXPORT_SYMBOL(kmap_high);
EXPORT_SYMBOL(kunmap_high);
EXPORT_SYMBOL(highmem_start_page);
+EXPORT_SYMBOL(create_bounce);
#endif
/* filesystem internal functions */
diff --git a/kernel/pm.c b/kernel/pm.c
index eb7c6f615..a520cae15 100644
--- a/kernel/pm.c
+++ b/kernel/pm.c
@@ -25,7 +25,19 @@
int pm_active;
-static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Locking notes:
+ * pm_devs_lock can be a semaphore providing pm ops are not called
+ * from an interrupt handler (already a bad idea so no change here). Each
+ * change must be protected so that an unlink of an entry doesnt clash
+ * with a pm send - which is permitted to sleep in the current architecture
+ *
+ * Module unloads clashing with pm events now work out safely, the module
+ * unload path will block until the event has been sent. It may well block
+ * until a resume but that will be fine.
+ */
+
+static DECLARE_MUTEX(pm_devs_lock);
static LIST_HEAD(pm_devs);
/**
@@ -45,16 +57,14 @@ struct pm_dev *pm_register(pm_dev_t type,
{
struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
if (dev) {
- unsigned long flags;
-
memset(dev, 0, sizeof(*dev));
dev->type = type;
dev->id = id;
dev->callback = callback;
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_add(&dev->entry, &pm_devs);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
}
return dev;
}
@@ -70,12 +80,18 @@ struct pm_dev *pm_register(pm_dev_t type,
void pm_unregister(struct pm_dev *dev)
{
if (dev) {
- unsigned long flags;
-
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_del(&dev->entry);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
+
+ kfree(dev);
+ }
+}
+static void __pm_unregister(struct pm_dev *dev)
+{
+ if (dev) {
+ list_del(&dev->entry);
kfree(dev);
}
}
@@ -97,13 +113,15 @@ void pm_unregister_all(pm_callback callback)
if (!callback)
return;
+ down(&pm_devs_lock);
entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
entry = entry->next;
if (dev->callback == callback)
- pm_unregister(dev);
+ __pm_unregister(dev);
}
+ up(&pm_devs_lock);
}
/**
@@ -119,6 +137,13 @@ void pm_unregister_all(pm_callback callback)
*
* BUGS: what stops two power management requests occuring in parallel
* and conflicting.
+ *
+ * WARNING: Calling pm_send directly is not generally recommended, in
+ * paticular there is no locking against the pm_dev going away. The
+ * caller must maintain all needed locking or have 'inside knowledge'
+ * on the safety. Also remember that this function is not locked against
+ * pm_unregister. This means that you must handle SMP races on callback
+ * execution and unload yourself.
*/
int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
@@ -183,6 +208,12 @@ static void pm_undo_all(struct pm_dev *last)
* during the processing of this request are restored to their
* previous state.
*
+ * WARNING: This function takes the pm_devs_lock. The lock is not dropped until
+ * the callbacks have completed. This prevents races against pm locking
+ * functions, races against module unload pm_unregister code. It does
+ * mean however that you must not issue pm_ functions within the callback
+ * or you will deadlock and users will hate you.
+ *
* Zero is returned on success. If a suspend fails then the status
* from the device that vetoes the suspend is returned.
*
@@ -192,7 +223,10 @@ static void pm_undo_all(struct pm_dev *last)
int pm_send_all(pm_request_t rqst, void *data)
{
- struct list_head *entry = pm_devs.next;
+ struct list_head *entry;
+
+ down(&pm_devs_lock);
+ entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
if (dev->callback) {
@@ -203,11 +237,13 @@ int pm_send_all(pm_request_t rqst, void *data)
*/
if (rqst == PM_SUSPEND)
pm_undo_all(dev);
+ up(&pm_devs_lock);
return status;
}
}
entry = entry->next;
}
+ up(&pm_devs_lock);
return 0;
}
@@ -222,6 +258,10 @@ int pm_send_all(pm_request_t rqst, void *data)
* of the list.
*
* To search from the beginning pass %NULL as the @from value.
+ *
+ * The caller MUST hold the pm_devs_lock lock when calling this
+ * function. The instant that the lock is dropped all pointers returned
+ * may become invalid.
*/
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 410f9de93..bb094496a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -28,6 +28,7 @@ static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, u
struct page *page;
repeat:
+ spin_lock(&mm->page_table_lock);
pgdir = pgd_offset(vma->vm_mm, addr);
if (pgd_none(*pgdir))
goto fault_in_page;
@@ -47,9 +48,13 @@ repeat:
/* ZERO_PAGE is special: reads from it are ok even though it's marked reserved */
if (page != ZERO_PAGE(addr) || write) {
- if ((!VALID_PAGE(page)) || PageReserved(page))
+ if ((!VALID_PAGE(page)) || PageReserved(page)) {
+ spin_unlock(&mm->page_table_lock);
return 0;
+ }
}
+ get_page(page);
+ spin_unlock(&mm->page_table_lock);
flush_cache_page(vma, addr);
if (write) {
@@ -64,19 +69,23 @@ repeat:
flush_page_to_ram(page);
kunmap(page);
}
+ put_page(page);
return len;
fault_in_page:
+ spin_unlock(&mm->page_table_lock);
/* -1: out of memory. 0 - unmapped page */
if (handle_mm_fault(mm, vma, addr, write) > 0)
goto repeat;
return 0;
bad_pgd:
+ spin_unlock(&mm->page_table_lock);
pgd_ERROR(*pgdir);
return 0;
bad_pmd:
+ spin_unlock(&mm->page_table_lock);
pmd_ERROR(*pgmiddle);
return 0;
}
@@ -131,13 +140,13 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (!mm)
return 0;
- down(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_extend_vma(mm, addr);
copied = 0;
if (vma)
copied = access_mm(mm, vma, addr, buf, len, write);
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
return copied;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index b6f6c879a..94f95eb4b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -82,6 +82,8 @@ struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
*
* If both locks are to be concurrently held, the runqueue_lock
* nests inside the tasklist_lock.
+ *
+ * task->alloc_lock nests inside tasklist_lock.
*/
spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
@@ -339,7 +341,7 @@ static inline int try_to_wake_up(struct task_struct * p, int synchronous)
if (task_on_runqueue(p))
goto out;
add_to_runqueue(p);
- if (!synchronous)
+ if (!synchronous || !(p->cpus_allowed & (1 << smp_processor_id())))
reschedule_idle(p);
success = 1;
out:
@@ -359,6 +361,32 @@ static void process_timeout(unsigned long __data)
wake_up_process(p);
}
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
signed long schedule_timeout(signed long timeout)
{
struct timer_list timer;
@@ -473,7 +501,7 @@ needs_resched:
goto out_unlock;
spin_lock_irqsave(&runqueue_lock, flags);
- if (prev->state == TASK_RUNNING)
+ if ((prev->state == TASK_RUNNING) && !prev->has_cpu)
reschedule_idle(prev);
spin_unlock_irqrestore(&runqueue_lock, flags);
goto out_unlock;
@@ -541,7 +569,7 @@ move_rr_back:
}
default:
del_from_runqueue(prev);
- case TASK_RUNNING:
+ case TASK_RUNNING:;
}
prev->need_resched = 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index ded9c6328..bb2b0c8bf 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -330,6 +330,12 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void * arg)
return 0;
}
+static void deferred_cad(void *dummy)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
+ machine_restart(NULL);
+}
+
/*
* This function gets called by ctrl-alt-del - ie the keyboard interrupt.
* As it's called within an interrupt, it may NOT sync: the only choice
@@ -337,10 +343,13 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void * arg)
*/
void ctrl_alt_del(void)
{
- if (C_A_D) {
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- machine_restart(NULL);
- } else
+ static struct tq_struct cad_tq = {
+ routine: deferred_cad,
+ };
+
+ if (C_A_D)
+ schedule_task(&cad_tq);
+ else
kill_proc(1, SIGINT, 1);
}
@@ -367,12 +376,14 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
{
int old_rgid = current->gid;
int old_egid = current->egid;
+ int new_rgid = old_rgid;
+ int new_egid = old_egid;
if (rgid != (gid_t) -1) {
if ((old_rgid == rgid) ||
(current->egid==rgid) ||
capable(CAP_SETGID))
- current->gid = rgid;
+ new_rgid = rgid;
else
return -EPERM;
}
@@ -381,18 +392,22 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
(current->egid == egid) ||
(current->sgid == egid) ||
capable(CAP_SETGID))
- current->fsgid = current->egid = egid;
+ new_egid = egid;
else {
- current->gid = old_rgid;
return -EPERM;
}
}
+ if (new_egid != old_egid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
- current->sgid = current->egid;
- current->fsgid = current->egid;
- if (current->egid != old_egid)
- current->dumpable = 0;
+ current->sgid = new_egid;
+ current->fsgid = new_egid;
+ current->egid = new_egid;
+ current->gid = new_rgid;
return 0;
}
@@ -406,14 +421,25 @@ asmlinkage long sys_setgid(gid_t gid)
int old_egid = current->egid;
if (capable(CAP_SETGID))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->gid = current->egid = current->sgid = current->fsgid = gid;
+ }
else if ((gid == current->gid) || (gid == current->sgid))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->egid = current->fsgid = gid;
+ }
else
return -EPERM;
-
- if (current->egid != old_egid)
- current->dumpable = 0;
return 0;
}
@@ -463,7 +489,7 @@ extern inline void cap_emulate_setxuid(int old_ruid, int old_euid,
}
}
-static int set_user(uid_t new_ruid)
+static int set_user(uid_t new_ruid, int dumpclear)
{
struct user_struct *new_user, *old_user;
@@ -479,6 +505,11 @@ static int set_user(uid_t new_ruid)
atomic_dec(&old_user->processes);
atomic_inc(&new_user->processes);
+ if(dumpclear)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->uid = new_ruid;
current->user = new_user;
free_uid(old_user);
@@ -525,16 +556,19 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
return -EPERM;
}
- if (new_ruid != old_ruid && set_user(new_ruid) < 0)
+ if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
return -EAGAIN;
+ if (new_euid != old_euid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old_ruid))
current->suid = current->euid;
current->fsuid = current->euid;
- if (current->euid != old_euid)
- current->dumpable = 0;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
@@ -559,21 +593,26 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
- int old_ruid, old_suid, new_ruid;
+ int old_ruid, old_suid, new_ruid, new_suid;
old_ruid = new_ruid = current->uid;
old_suid = current->suid;
+ new_suid = old_suid;
+
if (capable(CAP_SETUID)) {
- if (uid != old_ruid && set_user(uid) < 0)
+ if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
return -EAGAIN;
- current->suid = uid;
- } else if ((uid != current->uid) && (uid != current->suid))
+ new_suid = uid;
+ } else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
- current->fsuid = current->euid = uid;
-
if (old_euid != uid)
+ {
current->dumpable = 0;
+ wmb();
+ }
+ current->fsuid = current->euid = uid;
+ current->suid = new_suid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
@@ -605,12 +644,15 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
return -EPERM;
}
if (ruid != (uid_t) -1) {
- if (ruid != current->uid && set_user(ruid) < 0)
+ if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
return -EAGAIN;
}
if (euid != (uid_t) -1) {
if (euid != current->euid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->euid = euid;
current->fsuid = euid;
}
@@ -640,7 +682,7 @@ asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid)
*/
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
{
- if (!capable(CAP_SETGID)) {
+ if (!capable(CAP_SETGID)) {
if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
(rgid != current->egid) && (rgid != current->sgid))
return -EPERM;
@@ -651,14 +693,17 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
(sgid != current->egid) && (sgid != current->sgid))
return -EPERM;
}
- if (rgid != (gid_t) -1)
- current->gid = rgid;
if (egid != (gid_t) -1) {
if (egid != current->egid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->egid = egid;
current->fsgid = egid;
}
+ if (rgid != (gid_t) -1)
+ current->gid = rgid;
if (sgid != (gid_t) -1)
current->sgid = sgid;
return 0;
@@ -690,9 +735,14 @@ asmlinkage long sys_setfsuid(uid_t uid)
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
capable(CAP_SETUID))
+ {
+ if (uid != old_fsuid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsuid = uid;
- if (current->fsuid != old_fsuid)
- current->dumpable = 0;
+ }
/* We emulate fsuid by essentially doing a scaled-down version
* of what we did in setresuid and friends. However, we only
@@ -727,10 +777,14 @@ asmlinkage long sys_setfsgid(gid_t gid)
if (gid == current->gid || gid == current->egid ||
gid == current->sgid || gid == current->fsgid ||
capable(CAP_SETGID))
+ {
+ if (gid != old_fsgid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsgid = gid;
- if (current->fsgid != old_fsgid)
- current->dumpable = 0;
-
+ }
return old_fsgid;
}