summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /kernel
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c23
-rw-r--r--kernel/fork.c28
-rw-r--r--kernel/info.c36
-rw-r--r--kernel/kmod.c39
-rw-r--r--kernel/ksyms.c18
-rw-r--r--kernel/ptrace.c8
-rw-r--r--kernel/sched.c115
-rw-r--r--kernel/signal.c574
-rw-r--r--kernel/softirq.c10
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/timer.c4
12 files changed, 443 insertions, 423 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6eb4d568a..3f60ec29d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -229,6 +229,7 @@ void exit_files(struct task_struct *tsk)
{
__exit_files(tsk);
}
+
static inline void __put_fs_struct(struct fs_struct *fs)
{
/* No need to hold fs->lock if we are killing it */
@@ -267,26 +268,6 @@ void exit_fs(struct task_struct *tsk)
__exit_fs(tsk);
}
-static inline void __exit_sighand(struct task_struct *tsk)
-{
- struct signal_struct * sig = tsk->sig;
-
- if (sig) {
- spin_lock_irq(&tsk->sigmask_lock);
- tsk->sig = NULL;
- spin_unlock_irq(&tsk->sigmask_lock);
- if (atomic_dec_and_test(&sig->count))
- kmem_cache_free(sigact_cachep, sig);
- }
-
- flush_signals(tsk);
-}
-
-void exit_sighand(struct task_struct *tsk)
-{
- __exit_sighand(tsk);
-}
-
/*
* We can use these to temporarily drop into
* "lazy TLB" mode and back.
@@ -461,7 +442,7 @@ fake_volatile:
__exit_mm(tsk);
__exit_files(tsk);
__exit_fs(tsk);
- __exit_sighand(tsk);
+ exit_sighand(tsk);
exit_thread();
tsk->state = TASK_ZOMBIE;
tsk->exit_code = code;
diff --git a/kernel/fork.c b/kernel/fork.c
index 64dd0f995..b93b0b0e4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -500,15 +500,18 @@ out_release:
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
+ struct signal_struct *sig;
+
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(&current->sig->count);
return 0;
}
- tsk->sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
- if (!tsk->sig)
+ sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
+ tsk->sig = sig;
+ if (!sig)
return -1;
- spin_lock_init(&tsk->sig->siglock);
- atomic_set(&tsk->sig->count, 1);
+ spin_lock_init(&sig->siglock);
+ atomic_set(&sig->count, 1);
memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
return 0;
}
@@ -528,10 +531,15 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
/*
* Ok, this is the main fork-routine. It copies the system process
- * information (task[nr]) and sets up the necessary registers. It
- * also copies the data segment in its entirety.
+ * information (task[nr]) and sets up the necessary registers. It also
+ * copies the data segment in its entirety. The "stack_start" and
+ * "stack_top" arguments are simply passed along to the platform
+ * specific copy_thread() routine. Most platforms ignore stack_top.
+ * For an example that's using stack_top, see
+ * arch/ia64/kernel/process.c.
*/
-int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
+int do_fork(unsigned long clone_flags, unsigned long stack_start,
+ struct pt_regs *regs, unsigned long stack_top)
{
int retval = -ENOMEM;
struct task_struct *p;
@@ -591,9 +599,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
spin_lock_init(&p->alloc_lock);
p->sigpending = 0;
- sigemptyset(&p->signal);
- p->sigqueue = NULL;
- p->sigqueue_tail = &p->sigqueue;
+ init_sigpending(&p->pending);
p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
@@ -628,7 +634,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
goto bad_fork_cleanup_fs;
if (copy_mm(clone_flags, p))
goto bad_fork_cleanup_sighand;
- retval = copy_thread(0, clone_flags, usp, p, regs);
+ retval = copy_thread(0, clone_flags, stack_start, stack_top, p, regs);
if (retval)
goto bad_fork_cleanup_sighand;
p->semundo = NULL;
diff --git a/kernel/info.c b/kernel/info.c
index 3ee347444..d7abf6713 100644
--- a/kernel/info.c
+++ b/kernel/info.c
@@ -32,6 +32,42 @@ asmlinkage long sys_sysinfo(struct sysinfo *info)
si_meminfo(&val);
si_swapinfo(&val);
+ {
+ /* If the sum of all the available memory (i.e. ram + swap +
+ * highmem) is less then can be stored in a 32 bit unsigned long
+ * then we can be binary compatible with 2.2.x kernels. If not,
+ * well, who cares since in that case 2.2.x was broken anyways...
+ *
+ * -Erik Andersen <andersee@debian.org> */
+
+ unsigned long mem_total = val.totalram + val.totalswap;
+ if ( !(mem_total < val.totalram || mem_total < val.totalswap)) {
+ unsigned long mem_total2 = mem_total + val.totalhigh;
+ if (!(mem_total2 < mem_total || mem_total2 < val.totalhigh))
+ {
+ /* If mem_total did not overflow. Divide all memory values by
+ * mem_unit and set mem_unit=1. This leaves things compatible with
+ * 2.2.x, and also retains compatibility with earlier 2.4.x
+ * kernels... */
+
+ int bitcount = 0;
+ while (val.mem_unit > 1)
+ {
+ bitcount++;
+ val.mem_unit >>= 1;
+ }
+ val.totalram <<= bitcount;
+ val.freeram <<= bitcount;
+ val.sharedram <<= bitcount;
+ val.bufferram <<= bitcount;
+ val.totalswap <<= bitcount;
+ val.freeswap <<= bitcount;
+ val.totalhigh <<= bitcount;
+ val.freehigh <<= bitcount;
+ }
+ }
+ }
+
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
return -EFAULT;
return 0;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 4941840a7..3fff3ed3d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -247,5 +247,44 @@ int request_module(const char * module_name)
*/
char hotplug_path[256] = "/sbin/hotplug";
+
+static int exec_helper (void *arg)
+{
+ void **params = (void **) arg;
+ char *path = (char *) params [0];
+ char **argv = (char **) params [1];
+ char **envp = (char **) params [2];
+ return exec_usermodehelper (path, argv, envp);
+}
+
+
+int call_usermodehelper (char *path, char **argv, char **envp)
+{
+ void *params [3] = { path, argv, envp };
+ int pid, pid2, retval;
+ mm_segment_t fs;
+
+ if ( ! current->fs->root ) {
+ printk(KERN_ERR "call_usermodehelper[%s]: no root fs\n",
+ path);
+ return -EPERM;
+ }
+ if ((pid = kernel_thread (exec_helper, (void *) params, 0)) < 0) {
+ printk(KERN_ERR "failed fork %s, errno = %d", argv [0], -pid);
+ return -1;
+ }
+
+ fs = get_fs ();
+ set_fs (KERNEL_DS);
+ pid2 = waitpid (pid, &retval, __WCLONE);
+ set_fs (fs);
+
+ if (pid2 != pid) {
+ printk(KERN_ERR "waitpid(%d) failed, %d\n", pid, pid2);
+ return -1;
+ }
+ return retval;
+}
+
#endif
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index 660d492f6..2a8882dbc 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -55,9 +55,6 @@
extern int console_loglevel;
extern void set_device_ro(kdev_t dev,int flag);
-#if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
-extern long (*do_nfsservctl)(int, void *, void *);
-#endif
extern void *sys_call_table;
@@ -79,6 +76,7 @@ EXPORT_SYMBOL(request_module);
EXPORT_SYMBOL(exec_usermodehelper);
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(hotplug_path);
+EXPORT_SYMBOL(call_usermodehelper);
#endif
#endif
@@ -206,6 +204,7 @@ EXPORT_SYMBOL(block_prepare_write);
EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(cont_prepare_write);
EXPORT_SYMBOL(generic_commit_write);
+EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_file_read);
EXPORT_SYMBOL(do_generic_file_read);
@@ -216,6 +215,9 @@ EXPORT_SYMBOL(generic_buffer_fdatasync);
EXPORT_SYMBOL(page_hash_bits);
EXPORT_SYMBOL(page_hash_table);
EXPORT_SYMBOL(file_lock_list);
+EXPORT_SYMBOL(file_lock_sem);
+EXPORT_SYMBOL(locks_init_lock);
+EXPORT_SYMBOL(locks_copy_lock);
EXPORT_SYMBOL(posix_lock_file);
EXPORT_SYMBOL(posix_test_lock);
EXPORT_SYMBOL(posix_block_lock);
@@ -255,6 +257,10 @@ EXPORT_SYMBOL(page_follow_link);
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(block_symlink);
EXPORT_SYMBOL(vfs_readdir);
+EXPORT_SYMBOL(__get_lease);
+EXPORT_SYMBOL(lease_get_mtime);
+EXPORT_SYMBOL(lock_may_read);
+EXPORT_SYMBOL(lock_may_write);
EXPORT_SYMBOL(dcache_readdir);
/* for stackable file systems (lofs, wrapfs, cryptfs, etc.) */
@@ -265,10 +271,6 @@ EXPORT_SYMBOL(filemap_swapout);
EXPORT_SYMBOL(filemap_sync);
EXPORT_SYMBOL(lock_page);
-#if !defined(CONFIG_NFSD) && defined(CONFIG_NFSD_MODULE)
-EXPORT_SYMBOL(do_nfsservctl);
-#endif
-
/* device registration */
EXPORT_SYMBOL(register_chrdev);
EXPORT_SYMBOL(unregister_chrdev);
@@ -367,8 +369,6 @@ EXPORT_SYMBOL(remove_wait_queue);
#if !defined(CONFIG_ARCH_S390)
EXPORT_SYMBOL(probe_irq_on);
EXPORT_SYMBOL(probe_irq_off);
-EXPORT_SYMBOL(autoirq_setup);
-EXPORT_SYMBOL(autoirq_report);
#endif
#ifdef CONFIG_SMP
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a749bb501..87dea8254 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -44,8 +44,12 @@ repeat:
if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable)))
goto fault_in_page;
page = pte_page(*pgtable);
- if ((!VALID_PAGE(page)) || PageReserved(page))
- return 0;
+
+ /* ZERO_PAGE is special: reads from it are ok even though it's marked reserved */
+ if (page != ZERO_PAGE(addr) || write) {
+ if ((!VALID_PAGE(page)) || PageReserved(page))
+ return 0;
+ }
flush_cache_page(vma, addr);
if (write) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 8b47b558a..361b72491 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,61 +141,54 @@ static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struc
int weight;
/*
- * Realtime process, select the first one on the
- * runqueue (taking priorities within processes
- * into account).
+ * select the current process after every other
+ * runnable process, but before the idle thread.
+ * Also, dont trigger a counter recalculation.
*/
- if (p->policy != SCHED_OTHER) {
- weight = 1000 + p->rt_priority;
+ weight = -1;
+ if (p->policy & SCHED_YIELD)
goto out;
- }
/*
- * Give the process a first-approximation goodness value
- * according to the number of clock-ticks it has left.
- *
- * Don't do any other calculations if the time slice is
- * over..
+ * Non-RT process - normal case first.
*/
- weight = p->counter;
- if (!weight)
- goto out;
+ if (p->policy == SCHED_OTHER) {
+ /*
+ * Give the process a first-approximation goodness value
+ * according to the number of clock-ticks it has left.
+ *
+ * Don't do any other calculations if the time slice is
+ * over..
+ */
+ weight = p->counter;
+ if (!weight)
+ goto out;
#ifdef CONFIG_SMP
- /* Give a largish advantage to the same processor... */
- /* (this is equivalent to penalizing other processors) */
- if (p->processor == this_cpu)
- weight += PROC_CHANGE_PENALTY;
+ /* Give a largish advantage to the same processor... */
+ /* (this is equivalent to penalizing other processors) */
+ if (p->processor == this_cpu)
+ weight += PROC_CHANGE_PENALTY;
#endif
- /* .. and a slight advantage to the current MM */
- if (p->mm == this_mm || !p->mm)
- weight += 1;
- weight += 20 - p->nice;
+ /* .. and a slight advantage to the current MM */
+ if (p->mm == this_mm || !p->mm)
+ weight += 1;
+ weight += 20 - p->nice;
+ goto out;
+ }
+ /*
+ * Realtime process, select the first one on the
+ * runqueue (taking priorities within processes
+ * into account).
+ */
+ weight = 1000 + p->rt_priority;
out:
return weight;
}
/*
- * subtle. We want to discard a yielded process only if it's being
- * considered for a reschedule. Wakeup-time 'queries' of the scheduling
- * state do not count. Another optimization we do: sched_yield()-ed
- * processes are runnable (and thus will be considered for scheduling)
- * right when they are calling schedule(). So the only place we need
- * to care about SCHED_YIELD is when we calculate the previous process'
- * goodness ...
- */
-static inline int prev_goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
-{
- if (p->policy & SCHED_YIELD) {
- p->policy &= ~SCHED_YIELD;
- return 0;
- }
- return goodness(p, this_cpu, this_mm);
-}
-
-/*
* the 'goodness value' of replacing a process on a given CPU.
* positive value means 'replace', zero or negative means 'dont'.
*/
@@ -213,7 +206,9 @@ static inline int preemption_goodness(struct task_struct * prev, struct task_str
* This function must be inline as anything that saves and restores
* flags has to do so within the same register window on sparc (Anton)
*/
-static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
+static FASTCALL(void reschedule_idle(struct task_struct * p));
+
+static void reschedule_idle(struct task_struct * p)
{
#ifdef CONFIG_SMP
int this_cpu = smp_processor_id();
@@ -284,7 +279,6 @@ static inline void reschedule_idle(struct task_struct * p, unsigned long flags)
goto preempt_now;
}
- spin_unlock_irqrestore(&runqueue_lock, flags);
return;
send_now_idle:
@@ -296,12 +290,10 @@ send_now_idle:
if ((tsk->processor != current->processor) && !tsk->need_resched)
smp_send_reschedule(tsk->processor);
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
return;
preempt_now:
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
/*
* the APIC stuff can go outside of the lock because
* it uses no task information, only CPU#.
@@ -316,7 +308,6 @@ preempt_now:
tsk = cpu_curr(this_cpu);
if (preemption_goodness(tsk, p, this_cpu) > 1)
tsk->need_resched = 1;
- spin_unlock_irqrestore(&runqueue_lock, flags);
#endif
}
@@ -365,9 +356,7 @@ inline void wake_up_process(struct task_struct * p)
if (task_on_runqueue(p))
goto out;
add_to_runqueue(p);
- reschedule_idle(p, flags); // spin_unlocks runqueue
-
- return;
+ reschedule_idle(p);
out:
spin_unlock_irqrestore(&runqueue_lock, flags);
}
@@ -455,6 +444,7 @@ signed long schedule_timeout(signed long timeout)
static inline void __schedule_tail(struct task_struct *prev)
{
#ifdef CONFIG_SMP
+ int yield;
unsigned long flags;
/*
@@ -466,6 +456,8 @@ static inline void __schedule_tail(struct task_struct *prev)
* cache.
*/
spin_lock_irqsave(&runqueue_lock, flags);
+ yield = prev->policy & SCHED_YIELD;
+ prev->policy &= ~SCHED_YIELD;
prev->has_cpu = 0;
if (prev->state == TASK_RUNNING)
goto running_again;
@@ -480,10 +472,11 @@ out_unlock:
* current process as well.)
*/
running_again:
- if (prev == idle_task(smp_processor_id()))
- goto out_unlock;
- reschedule_idle(prev, flags); // spin_unlocks runqueue
- return;
+ if ((prev != idle_task(smp_processor_id())) && !yield)
+ reschedule_idle(prev);
+ goto out_unlock;
+#else
+ prev->policy &= ~SCHED_YIELD;
#endif /* CONFIG_SMP */
}
@@ -656,6 +649,9 @@ still_running_back:
same_process:
reacquire_kernel_lock(current);
+ if (current->need_resched)
+ goto tq_scheduler_back;
+
return;
recalculate:
@@ -671,7 +667,7 @@ recalculate:
goto repeat_schedule;
still_running:
- c = prev_goodness(prev, this_cpu, prev->active_mm);
+ c = goodness(prev, this_cpu, prev->active_mm);
next = prev;
goto still_running_back;
@@ -1032,12 +1028,13 @@ out_unlock:
asmlinkage long sys_sched_yield(void)
{
- spin_lock_irq(&runqueue_lock);
+ /*
+ * This process can only be rescheduled by us,
+ * so this is safe without any locking.
+ */
if (current->policy == SCHED_OTHER)
current->policy |= SCHED_YIELD;
current->need_resched = 1;
- move_last_runqueue(current);
- spin_unlock_irq(&runqueue_lock);
return 0;
}
@@ -1142,13 +1139,13 @@ static void show_task(struct task_struct * p)
printk("\n");
{
- struct signal_queue *q;
+ struct sigqueue *q;
char s[sizeof(sigset_t)*2+1], b[sizeof(sigset_t)*2+1];
- render_sigset_t(&p->signal, s);
+ render_sigset_t(&p->pending.signal, s);
render_sigset_t(&p->blocked, b);
printk(" sig: %d %s %s :", signal_pending(p), s, b);
- for (q = p->sigqueue; q ; q = q->next)
+ for (q = p->pending.head; q ; q = q->next)
printk(" %d", q->info.si_signo);
printk(" X\n");
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 7b256a954..4e6a4d909 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -29,32 +29,32 @@
#define SIG_SLAB_DEBUG 0
#endif
-static kmem_cache_t *signal_queue_cachep;
+static kmem_cache_t *sigqueue_cachep;
atomic_t nr_queued_signals;
int max_queued_signals = 1024;
void __init signals_init(void)
{
- signal_queue_cachep =
- kmem_cache_create("signal_queue",
- sizeof(struct signal_queue),
- __alignof__(struct signal_queue),
+ sigqueue_cachep =
+ kmem_cache_create("sigqueue",
+ sizeof(struct sigqueue),
+ __alignof__(struct sigqueue),
SIG_SLAB_DEBUG, NULL, NULL);
- if (!signal_queue_cachep)
- panic("signals_init(): cannot create signal_queue SLAB cache");
+ if (!sigqueue_cachep)
+ panic("signals_init(): cannot create sigqueue SLAB cache");
}
/* Given the mask, find the first available signal that should be serviced. */
static int
-next_signal(sigset_t *signal, sigset_t *mask)
+next_signal(struct task_struct *tsk, sigset_t *mask)
{
unsigned long i, *s, *m, x;
int sig = 0;
- s = signal->sig;
+ s = tsk->pending.signal.sig;
m = mask->sig;
switch (_NSIG_WORDS) {
default:
@@ -82,6 +82,23 @@ next_signal(sigset_t *signal, sigset_t *mask)
return sig;
}
+static void flush_sigqueue(struct sigpending *queue)
+{
+ struct sigqueue *q, *n;
+
+ sigemptyset(&queue->signal);
+ q = queue->head;
+ queue->head = NULL;
+ queue->tail = &queue->head;
+
+ while (q) {
+ n = q->next;
+ kmem_cache_free(sigqueue_cachep, q);
+ atomic_dec(&nr_queued_signals);
+ q = n;
+ }
+}
+
/*
* Flush all pending signals for a task.
*/
@@ -89,20 +106,23 @@ next_signal(sigset_t *signal, sigset_t *mask)
void
flush_signals(struct task_struct *t)
{
- struct signal_queue *q, *n;
-
t->sigpending = 0;
- sigemptyset(&t->signal);
- q = t->sigqueue;
- t->sigqueue = NULL;
- t->sigqueue_tail = &t->sigqueue;
+ flush_sigqueue(&t->pending);
+}
- while (q) {
- n = q->next;
- kmem_cache_free(signal_queue_cachep, q);
- atomic_dec(&nr_queued_signals);
- q = n;
+void exit_sighand(struct task_struct *tsk)
+{
+ struct signal_struct * sig = tsk->sig;
+
+ spin_lock_irq(&tsk->sigmask_lock);
+ if (sig) {
+ tsk->sig = NULL;
+ if (atomic_dec_and_test(&sig->count))
+ kmem_cache_free(sigact_cachep, sig);
}
+ tsk->sigpending = 0;
+ flush_sigqueue(&tsk->pending);
+ spin_unlock_irq(&tsk->sigmask_lock);
}
/*
@@ -134,9 +154,13 @@ flush_signal_handlers(struct task_struct *t)
void
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->sigmask_lock, flags);
current->notifier_mask = mask;
current->notifier_data = priv;
current->notifier = notifier;
+ spin_unlock_irqrestore(&current->sigmask_lock, flags);
}
/* Notify the system that blocking has ended. */
@@ -144,9 +168,61 @@ block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
void
unblock_all_signals(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->sigmask_lock, flags);
current->notifier = NULL;
current->notifier_data = NULL;
recalc_sigpending(current);
+ spin_unlock_irqrestore(&current->sigmask_lock, flags);
+}
+
+static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+{
+ if (sigismember(&list->signal, sig)) {
+ /* Collect the siginfo appropriate to this signal. */
+ struct sigqueue *q, **pp;
+ pp = &list->head;
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig)
+ goto found_it;
+ pp = &q->next;
+ }
+
+ /* Ok, it wasn't in the queue. We must have
+ been out of queue space. So zero out the
+ info. */
+ sigdelset(&list->signal, sig);
+ info->si_signo = sig;
+ info->si_errno = 0;
+ info->si_code = 0;
+ info->si_pid = 0;
+ info->si_uid = 0;
+ return 1;
+
+found_it:
+ if ((*pp = q->next) == NULL)
+ list->tail = pp;
+
+ /* Copy the sigqueue information and free the queue entry */
+ copy_siginfo(info, &q->info);
+ kmem_cache_free(sigqueue_cachep,q);
+ atomic_dec(&nr_queued_signals);
+
+ /* Non-RT signals can exist multiple times.. */
+ if (sig >= SIGRTMIN) {
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig)
+ goto found_another;
+ pp = &q->next;
+ }
+ }
+
+ sigdelset(&list->signal, sig);
+found_another:
+ return 1;
+ }
+ return 0;
}
/*
@@ -166,17 +242,9 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
signal_pending(current));
#endif
- sig = next_signal(&current->signal, mask);
+ sig = next_signal(current, mask);
if (current->notifier) {
- sigset_t merged;
- int i;
- int altsig;
-
- for (i = 0; i < _NSIG_WORDS; i++)
- merged.sig[i] = mask->sig[i]
- | current->notifier_mask->sig[i];
- altsig = next_signal(&current->signal, &merged);
- if (sig != altsig) {
+ if (sigismember(current->notifier_mask, sig)) {
if (!(current->notifier)(current->notifier_data)) {
current->sigpending = 0;
return 0;
@@ -185,63 +253,13 @@ printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
}
if (sig) {
- int reset = 1;
-
- /* Collect the siginfo appropriate to this signal. */
- struct signal_queue *q, **pp;
- pp = &current->sigqueue;
- q = current->sigqueue;
-
- /* Find the one we're interested in ... */
- for ( ; q ; pp = &q->next, q = q->next)
- if (q->info.si_signo == sig)
- break;
- if (q) {
- if ((*pp = q->next) == NULL)
- current->sigqueue_tail = pp;
- copy_siginfo(info, &q->info);
- kmem_cache_free(signal_queue_cachep,q);
- atomic_dec(&nr_queued_signals);
-
- /* Then see if this signal is still pending.
- (Non rt signals may not be queued twice.)
- */
- if (sig >= SIGRTMIN)
- for (q = *pp; q; q = q->next)
- if (q->info.si_signo == sig) {
- reset = 0;
- break;
- }
-
- } else {
- /* Ok, it wasn't in the queue. We must have
- been out of queue space. So zero out the
- info. */
- info->si_signo = sig;
- info->si_errno = 0;
- info->si_code = 0;
- info->si_pid = 0;
- info->si_uid = 0;
- }
-
- if (reset) {
- sigdelset(&current->signal, sig);
- recalc_sigpending(current);
- }
-
+ if (!collect_signal(sig, &current->pending, info))
+ sig = 0;
+
/* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
we need to xchg out the timer overrun values. */
- } else {
- /* XXX: Once CLONE_PID is in to join those "threads" that are
- part of the same "process", look for signals sent to the
- "process" as well. */
-
- /* Sanity check... */
- if (mask == &current->blocked && signal_pending(current)) {
- printk(KERN_CRIT "SIG: sigpending lied\n");
- current->sigpending = 0;
- }
}
+ recalc_sigpending(current);
#if DEBUG_SIG
printk(" %d -> %d\n", signal_pending(current), sig);
@@ -250,44 +268,42 @@ printk(" %d -> %d\n", signal_pending(current), sig);
return sig;
}
-/*
- * Remove signal sig from queue and from t->signal.
- * Returns 1 if sig was found in t->signal.
- *
- * All callers must be holding t->sigmask_lock.
- */
-static int rm_sig_from_queue(int sig, struct task_struct *t)
+static int rm_from_queue(int sig, struct sigpending *s)
{
- struct signal_queue *q, **pp;
-
- if (sig >= SIGRTMIN) {
- printk(KERN_CRIT "SIG: rm_sig_from_queue() doesn't support rt signals\n");
- return 0;
- }
+ struct sigqueue *q, **pp;
- if (!sigismember(&t->signal, sig))
+ if (!sigismember(&s->signal, sig))
return 0;
- sigdelset(&t->signal, sig);
+ sigdelset(&s->signal, sig);
- pp = &t->sigqueue;
- q = t->sigqueue;
+ pp = &s->head;
- /* Find the one we're interested in ...
- It may appear only once. */
- for ( ; q ; pp = &q->next, q = q->next)
- if (q->info.si_signo == sig)
- break;
- if (q) {
- if ((*pp = q->next) == NULL)
- t->sigqueue_tail = pp;
- kmem_cache_free(signal_queue_cachep,q);
- atomic_dec(&nr_queued_signals);
+ while ((q = *pp) != NULL) {
+ if (q->info.si_signo == sig) {
+ if ((*pp = q->next) == NULL)
+ s->tail = pp;
+ kmem_cache_free(sigqueue_cachep,q);
+ atomic_dec(&nr_queued_signals);
+ continue;
+ }
+ pp = &q->next;
}
return 1;
}
/*
+ * Remove signal sig from t->pending.
+ * Returns 1 if sig was found.
+ *
+ * All callers must be holding t->sigmask_lock.
+ */
+static int rm_sig_from_queue(int sig, struct task_struct *t)
+{
+ return rm_from_queue(sig, &t->pending);
+}
+
+/*
* Bad permissions for sending the signal
*/
int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
@@ -300,6 +316,46 @@ int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
}
/*
+ * Signal type:
+ * < 0 : global action (kill - spread to all non-blocked threads)
+ * = 0 : ignored
+ * > 0 : wake up.
+ */
+static int signal_type(int sig, struct signal_struct *signals)
+{
+ unsigned long handler;
+
+ if (!signals)
+ return 0;
+
+ handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
+ if (handler > 1)
+ return 1;
+
+ /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
+ if (handler == 1)
+ return sig == SIGCHLD;
+
+ /* Default handler. Normally lethal, but.. */
+ switch (sig) {
+
+ /* Ignored */
+ case SIGCONT: case SIGWINCH:
+ case SIGCHLD: case SIGURG:
+ return 0;
+
+ /* Implicit behaviour */
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ return 1;
+
+ /* Implicit actions (kill or do special stuff) */
+ default:
+ return -1;
+ }
+}
+
+
+/*
* Determine whether a signal should be posted or not.
*
* Signals with SIG_IGN can be ignored, except for the
@@ -309,41 +365,18 @@ int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
*/
static int ignored_signal(int sig, struct task_struct *t)
{
- struct signal_struct *signals;
- struct k_sigaction *ka;
-
/* Don't ignore traced or blocked signals */
if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
return 0;
-
- signals = t->sig;
- if (!signals)
- return 1;
-
- ka = &signals->action[sig-1];
- switch ((unsigned long) ka->sa.sa_handler) {
- case (unsigned long) SIG_DFL:
- if (sig == SIGCONT ||
- sig == SIGWINCH ||
- sig == SIGCHLD ||
- sig == SIGURG)
- break;
- return 0;
- case (unsigned long) SIG_IGN:
- if (sig != SIGCHLD)
- break;
- /* fallthrough */
- default:
- return 0;
- }
- return 1;
+ return signal_type(sig, t->sig) == 0;
}
/*
- * Handle TASK_STOPPED.
- * Also, return true for the unblockable signals that we
- * should deliver to all threads..
+ * Handle TASK_STOPPED cases etc implicit behaviour
+ * of certain magical signals.
+ *
+ * SIGKILL gets spread out to every thread.
*/
static void handle_stop_signal(int sig, struct task_struct *t)
{
@@ -353,24 +386,23 @@ static void handle_stop_signal(int sig, struct task_struct *t)
if (t->state == TASK_STOPPED)
wake_up_process(t);
t->exit_code = 0;
- if (rm_sig_from_queue(SIGSTOP, t) || rm_sig_from_queue(SIGTSTP, t) ||
- rm_sig_from_queue(SIGTTOU, t) || rm_sig_from_queue(SIGTTIN, t))
- recalc_sigpending(t);
+ rm_sig_from_queue(SIGSTOP, t);
+ rm_sig_from_queue(SIGTSTP, t);
+ rm_sig_from_queue(SIGTTOU, t);
+ rm_sig_from_queue(SIGTTIN, t);
break;
case SIGSTOP: case SIGTSTP:
case SIGTTIN: case SIGTTOU:
/* If we're stopping again, cancel SIGCONT */
- if (rm_sig_from_queue(SIGCONT, t))
- recalc_sigpending(t);
+ rm_sig_from_queue(SIGCONT, t);
break;
}
- return 0;
}
-static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
+static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
{
- struct signal_queue * q = NULL;
+ struct sigqueue * q = NULL;
/* Real-time signals must be queued if sent by sigqueue, or
some other real-time mechanism. It is implementation
@@ -381,14 +413,14 @@ static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
pass on the info struct. */
if (atomic_read(&nr_queued_signals) < max_queued_signals) {
- q = kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
+ q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
}
if (q) {
atomic_inc(&nr_queued_signals);
q->next = NULL;
- *t->sigqueue_tail = q;
- t->sigqueue_tail = &q->next;
+ *signals->tail = q;
+ signals->tail = &q->next;
switch ((unsigned long) info) {
case 0:
q->info.si_signo = sig;
@@ -417,94 +449,58 @@ static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
return -EAGAIN;
}
- sigaddset(&t->signal, sig);
- if (!sigismember(&t->blocked, sig)) {
- t->sigpending = 1;
-#ifdef CONFIG_SMP
- /*
- * If the task is running on a different CPU
- * force a reschedule on the other CPU - note that
- * the code below is a tad loose and might occasionally
- * kick the wrong CPU if we catch the process in the
- * process of changing - but no harm is done by that
- * other than doing an extra (lightweight) IPI interrupt.
- *
- * note that we rely on the previous spin_lock to
- * lock interrupts for us! No need to set need_resched
- * since signal event passing goes through ->blocked.
- */
- spin_lock(&runqueue_lock);
- if (t->has_cpu && t->processor != smp_processor_id())
- smp_send_reschedule(t->processor);
- spin_unlock(&runqueue_lock);
-#endif /* CONFIG_SMP */
- }
+ sigaddset(&signals->signal, sig);
return 0;
}
-
/*
- * Send a thread-group-wide signal.
- *
- * Rule: SIGSTOP and SIGKILL get delivered to _everybody_.
+ * Tell a process that it has a new active signal..
*
- * Others get delivered to the thread that doesn't have them
- * blocked (just one such thread).
+ * NOTE! we rely on the previous spin_lock to
+ * lock interrupts for us! We can only be called with
+ * "sigmask_lock" held, and the local interrupt must
+ * have been disabled when that got aquired!
*
- * If all threads have it blocked, it gets delievered to the
- * thread group leader.
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
*/
-static int send_tg_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+static inline void signal_wake_up(struct task_struct *t)
{
- int retval = 0;
- struct task_struct *tsk;
-
- if (sig < 0 || sig > _NSIG)
- return -EINVAL;
+ t->sigpending = 1;
- if (bad_signal(sig, info, p))
- return -EPERM;
-
- if (!sig)
- return 0;
-
- tsk = p;
- do {
- unsigned long flags;
- tsk = next_thread(tsk);
-
- /* Zombie? Ignore */
- if (!tsk->sig)
- continue;
-
- spin_lock_irqsave(&tsk->sigmask_lock, flags);
- handle_stop_signal(sig, tsk);
+ if (t->state & TASK_INTERRUPTIBLE) {
+ wake_up_process(t);
+ return;
+ }
- /* Is the signal ignored by this thread? */
- if (ignored_signal(sig, tsk))
- goto next;
+#ifdef CONFIG_SMP
+ /*
+ * If the task is running on a different CPU
+ * force a reschedule on the other CPU to make
+ * it notice the new signal quickly.
+ *
+ * The code below is a tad loose and might occasionally
+ * kick the wrong CPU if we catch the process in the
+ * process of changing - but no harm is done by that
+ * other than doing an extra (lightweight) IPI interrupt.
+ */
+ spin_lock(&runqueue_lock);
+ if (t->has_cpu && t->processor != smp_processor_id())
+ smp_send_reschedule(t->processor);
+ spin_unlock(&runqueue_lock);
+#endif /* CONFIG_SMP */
+}
- /* Have we already delivered this non-queued signal? */
- if (sig < SIGRTMIN && sigismember(&tsk->signal, sig))
- goto next;
+static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
+{
+ int retval = send_signal(sig, info, &t->pending);
- /* Not blocked? Go, girl, go! */
- if (tsk == p || !sigismember(&tsk->blocked, sig)) {
- retval = deliver_signal(sig, info, tsk);
+ if (!retval && !sigismember(&t->blocked, sig))
+ signal_wake_up(t);
- /* Signals other than SIGKILL and SIGSTOP have "once" semantics */
- if (sig != SIGKILL && sig != SIGSTOP)
- tsk = p;
- }
-next:
- spin_unlock_irqrestore(&tsk->sigmask_lock, flags);
- if ((tsk->state & TASK_INTERRUPTIBLE) && signal_pending(tsk))
- wake_up_process(tsk);
- } while (tsk != p);
return retval;
}
-
int
send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
@@ -543,7 +539,7 @@ printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
/* Support queueing exactly one non-rt signal, so that we
can get more detailed information about the cause of
the signal. */
- if (sig < SIGRTMIN && sigismember(&t->signal, sig))
+ if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
goto out;
ret = deliver_signal(sig, info, t);
@@ -654,32 +650,6 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
/*
- * Send a signal to a thread group..
- *
- * If the pid is the thread group ID, we consider this
- * a "thread group" signal. Otherwise it degenerates into
- * a thread-specific signal.
- */
-static int kill_tg_info(int sig, struct siginfo *info, pid_t pid)
-{
- int error;
- struct task_struct *p;
-
- read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
- error = -ESRCH;
- if (p) {
- /* Is it the leader? Otherwise it degenerates into a per-thread thing */
- if (p->tgid == pid)
- error = send_tg_sig_info(sig, info, p);
- else
- error = send_sig_info(sig, info, p);
- }
- read_unlock(&tasklist_lock);
- return error;
-}
-
-/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
*
* POSIX specifies that kill(-1,sig) is unspecified, but what we have
@@ -708,7 +678,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
} else if (pid < 0) {
return kill_pg_info(sig, info, -pid);
} else {
- return kill_tg_info(sig, info, pid);
+ return kill_proc_info(sig, info, pid);
}
}
@@ -911,25 +881,29 @@ out:
return error;
}
-asmlinkage long
-sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
+long do_sigpending(void *set, unsigned long sigsetsize)
{
- int error = -EINVAL;
+ long error = -EINVAL;
sigset_t pending;
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
+ if (sigsetsize > sizeof(sigset_t))
goto out;
spin_lock_irq(&current->sigmask_lock);
- sigandsets(&pending, &current->blocked, &current->signal);
+ sigandsets(&pending, &current->blocked, &current->pending.signal);
spin_unlock_irq(&current->sigmask_lock);
error = -EFAULT;
- if (!copy_to_user(set, &pending, sizeof(*set)))
+ if (!copy_to_user(set, &pending, sigsetsize))
error = 0;
out:
return error;
+}
+
+asmlinkage long
+sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
+{
+ return do_sigpending(set, sigsetsize);
}
asmlinkage long
@@ -967,25 +941,28 @@ sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
spin_lock_irq(&current->sigmask_lock);
sig = dequeue_signal(&these, &info);
if (!sig) {
- /* None ready -- temporarily unblock those we're interested
- in so that we'll be awakened when they arrive. */
- sigset_t oldblocked = current->blocked;
- sigandsets(&current->blocked, &current->blocked, &these);
- recalc_sigpending(current);
- spin_unlock_irq(&current->sigmask_lock);
-
timeout = MAX_SCHEDULE_TIMEOUT;
if (uts)
timeout = (timespec_to_jiffies(&ts)
+ (ts.tv_sec || ts.tv_nsec));
- current->state = TASK_INTERRUPTIBLE;
- timeout = schedule_timeout(timeout);
+ if (timeout) {
+ /* None ready -- temporarily unblock those we're
+ * interested while we are sleeping in so that we'll
+ * be awakened when they arrive. */
+ sigset_t oldblocked = current->blocked;
+ sigandsets(&current->blocked, &current->blocked, &these);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
- spin_lock_irq(&current->sigmask_lock);
- sig = dequeue_signal(&these, &info);
- current->blocked = oldblocked;
- recalc_sigpending(current);
+ current->state = TASK_INTERRUPTIBLE;
+ timeout = schedule_timeout(timeout);
+
+ spin_lock_irq(&current->sigmask_lock);
+ sig = dequeue_signal(&these, &info);
+ current->blocked = oldblocked;
+ recalc_sigpending(current);
+ }
}
spin_unlock_irq(&current->sigmask_lock);
@@ -1045,10 +1022,12 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
(act && (sig == SIGKILL || sig == SIGSTOP)))
return -EINVAL;
- spin_lock_irq(&current->sigmask_lock);
k = &current->sig->action[sig-1];
- if (oact) *oact = *k;
+ spin_lock(&current->sig->siglock);
+
+ if (oact)
+ *oact = *k;
if (act) {
*k = *act;
@@ -1076,33 +1055,14 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
&& (sig == SIGCONT ||
sig == SIGCHLD ||
sig == SIGWINCH))) {
- /* So dequeue any that might be pending.
- XXX: process-wide signals? */
- if (sig >= SIGRTMIN &&
- sigismember(&current->signal, sig)) {
- struct signal_queue *q, **pp;
- pp = &current->sigqueue;
- q = current->sigqueue;
- while (q) {
- if (q->info.si_signo != sig)
- pp = &q->next;
- else {
- if ((*pp = q->next) == NULL)
- current->sigqueue_tail = pp;
- kmem_cache_free(signal_queue_cachep, q);
- atomic_dec(&nr_queued_signals);
- }
- q = *pp;
- }
-
- }
- sigdelset(&current->signal, sig);
- recalc_sigpending(current);
+ spin_lock_irq(&current->sigmask_lock);
+ if (rm_sig_from_queue(sig, current))
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
}
}
- spin_unlock_irq(&current->sigmask_lock);
-
+ spin_unlock(&current->sig->siglock);
return 0;
}
@@ -1170,6 +1130,12 @@ out:
return error;
}
+asmlinkage long
+sys_sigpending(old_sigset_t *set)
+{
+ return do_sigpending(set, sizeof(*set));
+}
+
#if !defined(__alpha__)
/* Alpha has its own versions with special arguments. */
@@ -1222,22 +1188,6 @@ out:
return error;
}
-asmlinkage long
-sys_sigpending(old_sigset_t *set)
-{
- int error;
- old_sigset_t pending;
-
- spin_lock_irq(&current->sigmask_lock);
- pending = current->blocked.sig[0] & current->signal.sig[0];
- spin_unlock_irq(&current->sigmask_lock);
-
- error = -EFAULT;
- if (!copy_to_user(set, &pending, sizeof(*set)))
- error = 0;
- return error;
-}
-
#ifndef __sparc__
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 12a82399d..f7be8abd3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -44,7 +44,7 @@
irq_cpustat_t irq_stat[NR_CPUS];
#endif /* CONFIG_ARCH_S390 */
-static struct softirq_action softirq_vec[32];
+static struct softirq_action softirq_vec[32] __cacheline_aligned;
asmlinkage void do_softirq()
{
@@ -140,6 +140,14 @@ static void tasklet_action(struct softirq_action *a)
clear_bit(TASKLET_STATE_SCHED, &t->state);
t->func(t->data);
+ /*
+ * talklet_trylock() uses test_and_set_bit that imply
+ * an mb when it returns zero, thus we need the explicit
+ * mb only here: while closing the critical section.
+ */
+#ifdef CONFIG_SMP
+ smp_mb__before_clear_bit();
+#endif
tasklet_unlock(t);
continue;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 8a4453104..87ee4770c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -123,18 +123,15 @@ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
int ret=NOTIFY_DONE;
struct notifier_block *nb = *n;
- read_lock(&notifier_lock);
while(nb)
{
ret=nb->notifier_call(nb,val,v);
if(ret&NOTIFY_STOP_MASK)
{
- read_unlock(&notifier_lock);
return ret;
}
nb=nb->next;
}
- read_unlock(&notifier_lock);
return ret;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b475af7ed..c320027fa 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -235,7 +235,7 @@ static ctl_table kern_table[] = {
static ctl_table vm_table[] = {
{VM_FREEPG, "freepages",
- &freepages, sizeof(freepages_t), 0644, NULL, &proc_dointvec},
+ &freepages, sizeof(freepages_t), 0444, NULL, &proc_dointvec},
{VM_BDFLUSH, "bdflush", &bdf_prm, 9*sizeof(int), 0644, NULL,
&proc_dointvec_minmax, &sysctl_intvec, NULL,
&bdflush_min, &bdflush_max},
@@ -283,6 +283,12 @@ static ctl_table fs_table[] = {
{FS_OVERFLOWGID, "overflowgid", &fs_overflowgid, sizeof(int), 0644, NULL,
&proc_dointvec_minmax, &sysctl_intvec, NULL,
&minolduid, &maxolduid},
+ {FS_LEASES, "leases-enable", &leases_enable, sizeof(int),
+ 0644, NULL, &proc_dointvec},
+ {FS_DIR_NOTIFY, "dir-notify-enable", &dir_notify_enable,
+ sizeof(int), 0644, NULL, &proc_dointvec},
+ {FS_LEASE_TIME, "lease-break-time", &lease_break_time, sizeof(int),
+ 0644, NULL, &proc_dointvec},
{0}
};
diff --git a/kernel/timer.c b/kernel/timer.c
index 044ba492e..0dd0a7331 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -22,7 +22,6 @@
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
-#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -596,9 +595,6 @@ void update_process_times(int user_tick)
kstat.per_cpu_system[cpu] += system;
} else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
kstat.per_cpu_system[cpu] += system;
-
- if (slab_cache_drain_mask & (1UL << cpu))
- slab_drain_local_cache();
}
/*