summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-19 01:28:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-19 01:28:40 +0000
commit8abb719409c9060a7c0676f76e9182c1e0b8ca46 (patch)
treeb88cc5a6cd513a04a512b7e6215c873c90a1c5dd /kernel
parentf01bd7aeafd95a08aafc9e3636bb26974df69d82 (diff)
Merge with 2.3.99-pre1.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exec_domain.c34
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/pm.c83
5 files changed, 105 insertions, 34 deletions
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 1ee1eee4d..111a3d69c 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -23,7 +23,7 @@ struct exec_domain default_exec_domain = {
};
static struct exec_domain *exec_domains = &default_exec_domain;
-
+static spinlock_t exec_domains_lock = SPIN_LOCK_UNLOCKED;
static asmlinkage void no_lcall7(int segment, struct pt_regs * regs)
{
@@ -32,15 +32,10 @@ static asmlinkage void no_lcall7(int segment, struct pt_regs * regs)
* personality set incorrectly. Check to see whether SVr4 is available,
* and use it, otherwise give the user a SEGV.
*/
- if (current->exec_domain && current->exec_domain->module)
- __MOD_DEC_USE_COUNT(current->exec_domain->module);
-
+ put_exec_domain(current->exec_domain);
current->personality = PER_SVR4;
current->exec_domain = lookup_exec_domain(current->personality);
- if (current->exec_domain && current->exec_domain->module)
- __MOD_INC_USE_COUNT(current->exec_domain->module);
-
if (current->exec_domain && current->exec_domain->handler
&& current->exec_domain->handler != no_lcall7) {
current->exec_domain->handler(segment, regs);
@@ -55,10 +50,15 @@ struct exec_domain *lookup_exec_domain(unsigned long personality)
unsigned long pers = personality & PER_MASK;
struct exec_domain *it;
+ spin_lock(&exec_domains_lock);
for (it=exec_domains; it; it=it->next)
- if (pers >= it->pers_low
- && pers <= it->pers_high)
+ if (pers >= it->pers_low && pers <= it->pers_high) {
+ if (!try_inc_mod_count(it->module))
+ continue;
+ spin_unlock(&exec_domains_lock);
return it;
+ }
+ spin_unlock(&exec_domains_lock);
/* Should never get this far. */
printk(KERN_ERR "No execution domain for personality 0x%02lx\n", pers);
@@ -73,11 +73,15 @@ int register_exec_domain(struct exec_domain *it)
return -EINVAL;
if (it->next)
return -EBUSY;
+ spin_lock(&exec_domains_lock);
for (tmp=exec_domains; tmp; tmp=tmp->next)
- if (tmp == it)
+ if (tmp == it) {
+ spin_unlock(&exec_domains_lock);
return -EBUSY;
+ }
it->next = exec_domains;
exec_domains = it;
+ spin_unlock(&exec_domains_lock);
return 0;
}
@@ -86,14 +90,17 @@ int unregister_exec_domain(struct exec_domain *it)
struct exec_domain ** tmp;
tmp = &exec_domains;
+ spin_lock(&exec_domains_lock);
while (*tmp) {
if (it == *tmp) {
*tmp = it->next;
it->next = NULL;
+ spin_unlock(&exec_domains_lock);
return 0;
}
tmp = &(*tmp)->next;
}
+ spin_unlock(&exec_domains_lock);
return -EINVAL;
}
@@ -113,12 +120,9 @@ asmlinkage long sys_personality(unsigned long personality)
goto out;
old_personality = current->personality;
- if (current->exec_domain && current->exec_domain->module)
- __MOD_DEC_USE_COUNT(current->exec_domain->module);
+ put_exec_domain(current->exec_domain);
current->personality = personality;
current->exec_domain = it;
- if (current->exec_domain->module)
- __MOD_INC_USE_COUNT(current->exec_domain->module);
ret = old_personality;
out:
unlock_kernel();
@@ -130,9 +134,11 @@ int get_exec_domain_list(char * page)
int len = 0;
struct exec_domain * e;
+ spin_lock(&exec_domains_lock);
for (e=exec_domains; e && len < PAGE_SIZE - 80; e=e->next)
len += sprintf(page+len, "%d-%d\t%-16s\t[%s]\n",
e->pers_low, e->pers_high, e->name,
e->module ? e->module->name : "kernel");
+ spin_unlock(&exec_domains_lock);
return len;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 333981ab9..4684b8c37 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,8 +55,8 @@ void release(struct task_struct * p)
* was given away by the parent in the first place.)
*/
current->counter += p->counter;
- if (current->counter > current->priority)
- current->counter = current->priority;
+ if (current->counter >= current->priority*2)
+ current->counter = current->priority*2-1;
free_task_struct(p);
} else {
printk("task releasing itself\n");
@@ -425,8 +425,7 @@ fake_volatile:
tsk->exit_code = code;
exit_notify();
task_unlock(tsk);
- if (tsk->exec_domain && tsk->exec_domain->module)
- __MOD_DEC_USE_COUNT(tsk->exec_domain->module);
+ put_exec_domain(tsk->exec_domain);
if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module);
schedule();
diff --git a/kernel/fork.c b/kernel/fork.c
index f30adb908..2fbb08e32 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -647,8 +647,11 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
p->run_list.next = NULL;
p->run_list.prev = NULL;
- if ((clone_flags & CLONE_VFORK) || !(clone_flags & CLONE_PARENT))
- p->p_pptr = p->p_opptr = current;
+ if ((clone_flags & CLONE_VFORK) || !(clone_flags & CLONE_PARENT)) {
+ p->p_opptr = current;
+ if (!(current->flags & PF_PTRACED))
+ p->p_pptr = current;
+ }
p->p_cptr = NULL;
init_waitqueue_head(&p->wait_chldexit);
p->vfork_sem = NULL;
@@ -748,8 +751,7 @@ bad_fork_cleanup_fs:
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup:
- if (p->exec_domain && p->exec_domain->module)
- __MOD_DEC_USE_COUNT(p->exec_domain->module);
+ put_exec_domain(p->exec_domain);
if (p->binfmt && p->binfmt->module)
__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
diff --git a/kernel/module.c b/kernel/module.c
index 83b000342..a25596c51 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1040,4 +1040,9 @@ sys_get_kernel_syms(struct kernel_sym *table)
return -ENOSYS;
}
+int try_inc_mod_count(struct module *mod)
+{
+ return 1;
+}
+
#endif /* CONFIG_MODULES */
diff --git a/kernel/pm.c b/kernel/pm.c
index b2f60d65f..369d9b954 100644
--- a/kernel/pm.c
+++ b/kernel/pm.c
@@ -28,9 +28,17 @@ int pm_active = 0;
static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(pm_devs);
-/*
- * Register a device with power management
+/**
+ * pm_register - register a device with power management
+ * @type: The device type
+ * @id: Device ID
+ * @callback: Callback function
+ *
+ * Add a device to the list of devices that wish to be notified about
+ * power management events. A pm_dev structure is returnd on success,
+ * on failure the return is NULL
*/
+
struct pm_dev *pm_register(pm_dev_t type,
unsigned long id,
pm_callback callback)
@@ -51,9 +59,14 @@ struct pm_dev *pm_register(pm_dev_t type,
return dev;
}
-/*
- * Unregister a device with power management
+/**
+ * pm_unregister - unregister a device with power management
+ * @dev: device to unregister
+ *
+ * Remove a device from the power management notification lists. The
+ * dev passed must be a handle previously returned by pm_register.
*/
+
void pm_unregister(struct pm_dev *dev)
{
if (dev) {
@@ -67,9 +80,16 @@ void pm_unregister(struct pm_dev *dev)
}
}
-/*
- * Unregister all devices with matching callback
+/**
+ * pm_unregister_all - unregister all devices with matching callback
+ * @callback: callback function pointer
+ *
+ * Unregister every device that would call the callback passed. This
+ * is primarily meant as a helper function for loadable modules. It
+ * enables a module to give up all its managed devices without keeping
+ * its own private list.
*/
+
void pm_unregister_all(pm_callback callback)
{
struct list_head *entry;
@@ -86,9 +106,21 @@ void pm_unregister_all(pm_callback callback)
}
}
-/*
- * Send request to a single device
+/**
+ * pm_send - send request to a single device
+ * @dev: device to send to
+ * @rqst: power management request
+ * @data: data for the callback
+ *
+ * Issue a power management request to a given device. The
+ * PM_SUSPEND and PM_RESUME events are handled specially. The
+ * data field must hold the intented next state. No call is made
+ * if the state matches.
+ *
+ * BUGS: what stops two power management requests occuring in parallel
+ * and conflicting.
*/
+
int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
{
int status = 0;
@@ -138,9 +170,26 @@ static void pm_undo_all(struct pm_dev *last)
}
}
-/*
- * Send a request to all devices
+/**
+ * pm_send - send request to all managed device
+ * @rqst: power management request
+ * @data: data for the callback
+ *
+ * Issue a power management request to a all devices. The
+ * PM_SUSPEND events are handled specially. Any device is
+ * permitted to fail a suspend by returning a non zero (error)
+ * value from its callback function. If any device vetoes a
+ * suspend request then all other devices that have suspended
+ * during the processing of this request are restored to their
+ * previous state.
+ *
+ * Zero is returned on success. If a suspend fails then the status
+ * from the device that vetoes the suspend is returned.
+ *
+ * BUGS: what stops two power management requests occuring in parallel
+ * and conflicting.
*/
+
int pm_send_all(pm_request_t rqst, void *data)
{
struct list_head *entry = pm_devs.next;
@@ -162,9 +211,19 @@ int pm_send_all(pm_request_t rqst, void *data)
return 0;
}
-/*
- * Find a device
+/**
+ * pm_find - find a device
+ * @type: type of device
+ * @from: Where to start looking
+ *
+ * Scan the power management list for devices of a specific type. The
+ * return value for a matching device may be passed to further calls
+ * to this function to find further matches. A NULL indicates the end
+ * of the list.
+ *
+ * To search from the beginning pass NULL as the from value.
*/
+
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
{
struct list_head *entry = from ? from->entry.next:pm_devs.next;