summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/ksyms.c2
-rw-r--r--kernel/sched.c48
4 files changed, 20 insertions, 40 deletions
diff --git a/kernel/context.c b/kernel/context.c
index 864a70131..6bfaaca92 100644
--- a/kernel/context.c
+++ b/kernel/context.c
@@ -148,7 +148,7 @@ void flush_scheduled_tasks(void)
int start_context_thread(void)
{
- kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES);
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 99c1f2317..e578a9644 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -134,7 +134,6 @@ static inline int dup_mmap(struct mm_struct * mm)
mm->mmap_cache = NULL;
mm->map_count = 0;
mm->cpu_vm_mask = 0;
- mm->swap_cnt = 0;
mm->swap_address = 0;
pprev = &mm->mmap;
for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
@@ -193,6 +192,7 @@ fail_nomem:
}
spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
+int mmlist_nr;
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -246,6 +246,7 @@ void mmput(struct mm_struct *mm)
{
if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
list_del(&mm->mmlist);
+ mmlist_nr--;
spin_unlock(&mmlist_lock);
exit_mmap(mm);
mmdrop(mm);
@@ -326,6 +327,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
*/
spin_lock(&mmlist_lock);
list_add(&mm->mmlist, &oldmm->mmlist);
+ mmlist_nr++;
spin_unlock(&mmlist_lock);
if (retval)
@@ -445,7 +447,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
if (size > __FD_SETSIZE) {
newf->max_fdset = 0;
write_lock(&newf->file_lock);
- error = expand_fdset(newf, size);
+ error = expand_fdset(newf, size-1);
write_unlock(&newf->file_lock);
if (error)
goto out_release;
@@ -464,7 +466,7 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
read_unlock(&oldf->file_lock);
newf->max_fds = 0;
write_lock(&newf->file_lock);
- error = expand_fd_array(newf, open_files);
+ error = expand_fd_array(newf, open_files-1);
write_unlock(&newf->file_lock);
if (error)
goto out_release;
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 8afe07cca..f36261500 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -159,6 +159,7 @@ EXPORT_SYMBOL(d_alloc);
EXPORT_SYMBOL(d_lookup);
EXPORT_SYMBOL(__d_path);
EXPORT_SYMBOL(mark_buffer_dirty);
+EXPORT_SYMBOL(set_buffer_async_io); /* for reiserfs_writepage */
EXPORT_SYMBOL(__mark_buffer_dirty);
EXPORT_SYMBOL(__mark_inode_dirty);
EXPORT_SYMBOL(get_empty_filp);
@@ -394,6 +395,7 @@ EXPORT_SYMBOL(unmap_kiobuf);
EXPORT_SYMBOL(lock_kiovec);
EXPORT_SYMBOL(unlock_kiovec);
EXPORT_SYMBOL(brw_kiovec);
+EXPORT_SYMBOL(kiobuf_wait_for_io);
/* dma handling */
EXPORT_SYMBOL(request_dma);
diff --git a/kernel/sched.c b/kernel/sched.c
index bc2dcfa70..ec1f463d0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -690,19 +690,15 @@ scheduling_in_interrupt:
}
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
- unsigned int wq_mode, const int sync)
+ int nr_exclusive, const int sync)
{
struct list_head *tmp, *head;
- struct task_struct *p, *best_exclusive;
+ struct task_struct *p;
unsigned long flags;
- int best_cpu, irq;
if (!q)
goto out;
- best_cpu = smp_processor_id();
- irq = in_interrupt();
- best_exclusive = NULL;
wq_write_lock_irqsave(&q->lock, flags);
#if WAITQUEUE_DEBUG
@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#if WAITQUEUE_DEBUG
curr->__waker = (long)__builtin_return_address(0);
#endif
- /*
- * If waking up from an interrupt context then
- * prefer processes which are affine to this
- * CPU.
- */
- if (irq && (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)) {
- if (!best_exclusive)
- best_exclusive = p;
- if (p->processor == best_cpu) {
- best_exclusive = p;
- break;
- }
- } else {
- if (sync)
- wake_up_process_synchronous(p);
- else
- wake_up_process(p);
- if (curr->flags & wq_mode & WQ_FLAG_EXCLUSIVE)
- break;
- }
+ if (sync)
+ wake_up_process_synchronous(p);
+ else
+ wake_up_process(p);
+ if ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ break;
}
}
- if (best_exclusive) {
- if (sync)
- wake_up_process_synchronous(best_exclusive);
- else
- wake_up_process(best_exclusive);
- }
wq_write_unlock_irqrestore(&q->lock, flags);
out:
return;
}
-void __wake_up(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
+void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, wq_mode, 0);
+ __wake_up_common(q, mode, nr, 0);
}
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, unsigned int wq_mode)
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
{
- __wake_up_common(q, mode, wq_mode, 1);
+ __wake_up_common(q, mode, nr, 1);
}
#define SLEEP_ON_VAR \