summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
commitb63ad0882a16a5d28003e57f2b0b81dee3fb322b (patch)
tree0a343ce219e2b8b38a5d702d66032c57b83d9720 /arch/alpha
parenta9d7bff9a84dba79609a0002e5321b74c4d64c64 (diff)
Merge with 2.4.0-test11.
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/config.in5
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c23
-rw-r--r--arch/alpha/kernel/irq.c9
-rw-r--r--arch/alpha/kernel/semaphore.c498
-rw-r--r--arch/alpha/kernel/time.c3
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/semaphore.S348
7 files changed, 364 insertions, 524 deletions
diff --git a/arch/alpha/config.in b/arch/alpha/config.in
index 18fdcde8a..da5c361c9 100644
--- a/arch/alpha/config.in
+++ b/arch/alpha/config.in
@@ -63,12 +63,13 @@ unset CONFIG_ALPHA_T2 CONFIG_ALPHA_PYXIS CONFIG_ALPHA_POLARIS
unset CONFIG_ALPHA_TSUNAMI CONFIG_ALPHA_MCPCIA
unset CONFIG_ALPHA_IRONGATE
unset CONFIG_ALPHA_BROKEN_IRQ_MASK
-unset CONFIG_ALPHA_LARGE_VMALLOC
# Most of these machines have ISA slots; not exactly sure which don't,
# and this doesn't activate hordes of code, so do it always.
define_bool CONFIG_ISA y
+define_bool CONFIG_EISA y
define_bool CONFIG_SBUS n
+define_bool CONFIG_MCA n
if [ "$CONFIG_ALPHA_JENSEN" = "y" ]
then
@@ -214,6 +215,8 @@ if [ "$CONFIG_ALPHA_GENERIC" = "y" -o "$CONFIG_ALPHA_DP264" = "y" \
-o "$CONFIG_ALPHA_WILDFIRE" = "y" -o "$CONFIG_ALPHA_TITAN" = "y" ]
then
bool 'Large VMALLOC support' CONFIG_ALPHA_LARGE_VMALLOC
+else
+ define_bool CONFIG_ALPHA_LARGE_VMALLOC n
fi
source drivers/pci/Config.in
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 4ac2e8b0d..17285ac26 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -160,15 +160,20 @@ EXPORT_SYMBOL_NOVERS(__do_clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
-/*
- * The following are specially called from the semaphore assembly stubs.
- */
-EXPORT_SYMBOL_NOVERS(__down_failed);
-EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
-EXPORT_SYMBOL_NOVERS(__up_wakeup);
-EXPORT_SYMBOL_NOVERS(__down_read_failed);
-EXPORT_SYMBOL_NOVERS(__down_write_failed);
-EXPORT_SYMBOL_NOVERS(__rwsem_wake);
+/* Semaphore helper functions. */
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__up_wakeup);
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(up);
+EXPORT_SYMBOL(__down_read_failed);
+EXPORT_SYMBOL(__down_write_failed);
+EXPORT_SYMBOL(__rwsem_wake);
+EXPORT_SYMBOL(down_read);
+EXPORT_SYMBOL(down_write);
+EXPORT_SYMBOL(up_read);
+EXPORT_SYMBOL(up_write);
/*
* SMP-specific symbols.
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index ac7ca67f8..080e48e43 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -747,7 +747,7 @@ probe_irq_mask(unsigned long val)
unsigned int mask;
mask = 0;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < NR_IRQS; i++) {
irq_desc_t *desc = irq_desc + i;
unsigned int status;
@@ -755,8 +755,11 @@ probe_irq_mask(unsigned long val)
status = desc->status;
if (status & IRQ_AUTODETECT) {
- if (!(status & IRQ_WAITING))
- mask |= 1 << i;
+ /* We only react to ISA interrupts */
+ if (!(status & IRQ_WAITING)) {
+ if (i < 16)
+ mask |= 1 << i;
+ }
desc->status = status & ~IRQ_AUTODETECT;
desc->handler->shutdown(i);
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
index dc5209531..7c1c9a80c 100644
--- a/arch/alpha/kernel/semaphore.c
+++ b/arch/alpha/kernel/semaphore.c
@@ -1,139 +1,267 @@
/*
- * Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
+ * Alpha semaphore implementation.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999, 2000 Richard Henderson
*/
#include <linux/sched.h>
-#include <asm/semaphore-helper.h>
+
/*
* Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
+ *
+ * The "count" variable is decremented for each process that tries to sleep,
+ * while the "waking" variable is incremented when the "up()" code goes to
+ * wake up waiting processes.
*
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
+ * Notably, the inline "up()" and "down()" functions can efficiently test
+ * if they need to do any extra work (up needs to do something only if count
+ * was negative before the increment operation.
*
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
+ * waking_non_zero() (from asm/semaphore.h) must execute atomically.
*
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
+ * When __up() is called, the count was negative before incrementing it,
+ * and we need to wake up somebody.
*
- * This routine adds one to the count of processes that need to
- * wake up and exit. ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore. The others will go back
- * to sleep.
+ * This routine adds one to the count of processes that need to wake up and
+ * exit. ALL waiting processes actually wake up but only the one that gets
+ * to the "waking" field first will gate through and acquire the semaphore.
+ * The others will go back to sleep.
*
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
+ * Note that these functions are only called when there is contention on the
+ * lock, and as such all this is the "non-critical" part of the whole
+ * semaphore business. The critical part is the inline stuff in
+ * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
*/
-void
-__up(struct semaphore *sem)
-{
- wake_one_more(sem);
- wake_up(&sem->wait);
-}
-
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
- * If called from __down, the return is ignored and the wait loop is
+ * If called from down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
- * If called from __down_interruptible, the return value gets checked
+ * If called from down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
- *
*/
-#define DOWN_VAR \
- struct task_struct *tsk = current; \
- wait_queue_t wait; \
- init_waitqueue_entry(&wait, tsk)
-
-#define DOWN_HEAD(task_state) \
- \
- \
- tsk->state = (task_state); \
- add_wait_queue(&sem->wait, &wait); \
- \
- /* \
- * Ok, we're set up. sem->count is known to be less than zero \
- * so we must wait. \
- * \
- * We can let go the lock for purposes of waiting. \
- * We re-acquire it after awaking so as to protect \
- * all semaphore operations. \
- * \
- * If "up()" is called before we call waking_non_zero() then \
- * we will catch it right away. If it is called later then \
- * we will have to go through a wakeup cycle to catch it. \
- * \
- * Multiple waiters contend for the semaphore lock to see \
- * who gets to gate through and who has to wait some more. \
- */ \
- for (;;) {
-
-#define DOWN_TAIL(task_state) \
- tsk->state = (task_state); \
- } \
- tsk->state = TASK_RUNNING; \
- remove_wait_queue(&sem->wait, &wait)
-
void
-__down(struct semaphore * sem)
+__down_failed(struct semaphore *sem)
{
- DOWN_VAR;
- DOWN_HEAD(TASK_UNINTERRUPTIBLE);
+ DECLARE_WAITQUEUE(wait, current);
+
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ current->comm, current->pid, sem);
+#endif
+
+ current->state = TASK_UNINTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ /* At this point we know that sem->count is negative. In order
+ to avoid racing with __up, we must check for wakeup before
+ going to sleep the first time. */
+
+ while (1) {
+ long ret, tmp;
+
+ /* An atomic conditional decrement of sem->waking. */
+ __asm__ __volatile__(
+ "1: ldl_l %1,%2\n"
+ " blt %1,2f\n"
+ " subl %1,1,%0\n"
+ " stl_c %0,%2\n"
+ " beq %0,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=r"(ret), "=&r"(tmp), "=m"(sem->waking)
+ : "0"(0));
+
+ if (ret)
+ break;
- if (waking_non_zero(sem))
- break;
- schedule();
+ schedule();
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ }
- DOWN_TAIL(TASK_UNINTERRUPTIBLE);
+ remove_wait_queue(&sem->wait, &wait);
+ current->state = TASK_RUNNING;
+
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down acquired(%p)\n",
+ current->comm, current->pid, sem);
+#endif
}
int
-__down_interruptible(struct semaphore * sem)
+__down_failed_interruptible(struct semaphore *sem)
{
- int ret = 0;
- DOWN_VAR;
- DOWN_HEAD(TASK_INTERRUPTIBLE);
-
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret)
- {
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
- break;
+ DECLARE_WAITQUEUE(wait, current);
+ long ret;
+
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ current->comm, current->pid, sem);
+#endif
+
+ current->state = TASK_INTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (1) {
+ long tmp, tmp2, tmp3;
+
+ /* We must undo the sem->count down_interruptible decrement
+ simultaneously and atomicly with the sem->waking
+ adjustment, otherwise we can race with __up. This is
+ accomplished by doing a 64-bit ll/sc on two 32-bit words.
+
+ "Equivalent" C. Note that we have to do this all without
+ (taken) branches in order to be a valid ll/sc sequence.
+
+ do {
+ tmp = ldq_l;
+ ret = 0;
+ if (tmp >= 0) { // waking >= 0
+ tmp += 0xffffffff00000000; // waking -= 1
+ ret = 1;
+ }
+ else if (pending) {
+ // count += 1, but since -1 + 1 carries into the
+ // high word, we have to be more careful here.
+ tmp = (tmp & 0xffffffff00000000)
+ | ((tmp + 1) & 0x00000000ffffffff);
+ ret = -EINTR;
+ }
+ tmp = stq_c = tmp;
+ } while (tmp == 0);
+ */
+
+ __asm__ __volatile__(
+ "1: ldq_l %1,%4\n"
+ " lda %0,0\n"
+ " cmovne %5,%6,%0\n"
+ " addq %1,1,%2\n"
+ " and %1,%7,%3\n"
+ " andnot %2,%7,%2\n"
+ " cmovge %1,1,%0\n"
+ " or %3,%2,%2\n"
+ " addq %1,%7,%3\n"
+ " cmovne %5,%2,%1\n"
+ " cmovge %2,%3,%1\n"
+ " stq_c %1,%4\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2),
+ "=&r"(tmp3), "=m"(*sem)
+ : "r"(signal_pending(current)), "r"(-EINTR),
+ "r"(0xffffffff00000000));
+
+ /* At this point we have ret
+ 1 got the lock
+ 0 go to sleep
+ -EINTR interrupted */
+ if (ret != 0)
+ break;
+
+ schedule();
+ set_task_state(current, TASK_INTERRUPTIBLE);
}
- schedule();
- DOWN_TAIL(TASK_INTERRUPTIBLE);
- return ret;
+ remove_wait_queue(&sem->wait, &wait);
+ current->state = TASK_RUNNING;
+ wake_up(&sem->wait);
+
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down %s(%p)\n",
+ current->comm, current->pid,
+ (ret < 0 ? "interrupted" : "acquired"), sem);
+#endif
+
+ /* Convert "got the lock" to 0==success. */
+ return (ret < 0 ? ret : 0);
+}
+
+void
+__up_wakeup(struct semaphore *sem)
+{
+ wake_up(&sem->wait);
+}
+
+void
+down(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __down(sem);
+}
+
+int
+down_interruptible(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ return __down_interruptible(sem);
}
int
-__down_trylock(struct semaphore * sem)
+down_trylock(struct semaphore *sem)
{
- return waking_non_zero_trylock(sem);
+ int ret;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ ret = __down_trylock(sem);
+
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): down_trylock %s from %p\n",
+ current->comm, current->pid,
+ ret ? "failed" : "acquired",
+ __builtin_return_address(0));
+#endif
+
+ return ret;
+}
+
+void
+up(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#if DEBUG_SEMAPHORE
+ printk("%s(%d): up(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __up(sem);
}
@@ -142,124 +270,106 @@ __down_trylock(struct semaphore * sem)
*/
void
-__down_read(struct rw_semaphore *sem, int count)
+__down_read_failed(struct rw_semaphore *sem, int count)
{
- long tmp;
- DOWN_VAR;
+ DECLARE_WAITQUEUE(wait, current);
retry_down:
if (count < 0) {
- /* Wait for the lock to become unbiased. Readers
- are non-exclusive. */
+ /* Waiting on multiple readers and/or writers. */
- /* This takes care of granting the lock. */
- up_read(sem);
+ /* Undo the acquisition we started in down_read. */
+ atomic_inc(&sem->count);
+ current->state = TASK_UNINTERRUPTIBLE;
+ wmb();
add_wait_queue(&sem->wait, &wait);
- while (sem->count < 0) {
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (sem->count >= 0)
- break;
+ mb();
+ while (atomic_read(&sem->count) < 0) {
schedule();
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-
- __asm __volatile (
- " mb\n"
- "1: ldl_l %0,%1\n"
- " subl %0,1,%2\n"
- " subl %0,1,%0\n"
- " stl_c %2,%1\n"
- " bne %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=r"(count), "=m"(sem->count), "=r"(tmp)
- : : "memory");
+ current->state = TASK_RUNNING;
+
+ mb();
+ count = atomic_dec_return(&sem->count);
if (count <= 0)
goto retry_down;
} else {
+ /* Waiting on exactly one writer. */
+
+ current->state = TASK_UNINTERRUPTIBLE;
+ wmb();
add_wait_queue(&sem->wait, &wait);
+ mb();
- while (1) {
- if (test_and_clear_bit(0, &sem->granted))
- break;
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if ((sem->granted & 1) == 0)
- schedule();
+ while (!test_and_clear_bit(0, &sem->granted)) {
+ schedule();
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
+ current->state = TASK_RUNNING;
}
}
void
-__down_write(struct rw_semaphore *sem, int count)
+__down_write_failed(struct rw_semaphore *sem, int count)
{
- long tmp;
- DOWN_VAR;
+ DECLARE_WAITQUEUE(wait, current);
retry_down:
if (count + RW_LOCK_BIAS < 0) {
- up_write(sem);
+ /* Waiting on multiple readers and/or writers. */
+
+ /* Undo the acquisition we started in down_write. */
+ atomic_add(RW_LOCK_BIAS, &sem->count);
+ current->state = TASK_UNINTERRUPTIBLE;
+ wmb();
add_wait_queue_exclusive(&sem->wait, &wait);
+ mb();
- while (sem->count < 0) {
- set_task_state(tsk, (TASK_UNINTERRUPTIBLE
- | TASK_EXCLUSIVE));
- if (sem->count >= RW_LOCK_BIAS)
- break;
+ while (atomic_read(&sem->count) + RW_LOCK_BIAS < 0) {
schedule();
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
-
- __asm __volatile (
- " mb\n"
- "1: ldl_l %0,%1\n"
- " ldah %2,%3(%0)\n"
- " ldah %0,%3(%0)\n"
- " stl_c %2,%1\n"
- " bne %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=r"(count), "=m"(sem->count), "=r"(tmp)
- : "i"(-(RW_LOCK_BIAS >> 16))
- : "memory");
+ current->state = TASK_RUNNING;
+
+ count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
if (count != 0)
goto retry_down;
} else {
- /* Put ourselves at the end of the list. */
- add_wait_queue_exclusive(&sem->write_bias_wait, &wait);
-
- while (1) {
- if (test_and_clear_bit(1, &sem->granted))
- break;
- set_task_state(tsk, (TASK_UNINTERRUPTIBLE
- | TASK_EXCLUSIVE));
- if ((sem->granted & 2) == 0)
- schedule();
+ /* Waiting on exactly one writer. */
+
+ current->state = TASK_UNINTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+ mb();
+
+ while (!test_and_clear_bit(1, &sem->granted)) {
+ schedule();
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->write_bias_wait, &wait);
- tsk->state = TASK_RUNNING;
+ current->state = TASK_RUNNING;
/* If the lock is currently unbiased, awaken the sleepers.
FIXME: This wakes up the readers early in a bit of a
stampede -> bad! */
- if (sem->count >= 0)
+ count = atomic_read(&sem->count);
+ if (__builtin_expect(count >= 0, 0))
wake_up(&sem->wait);
}
}
void
-__do_rwsem_wake(struct rw_semaphore *sem, int readers)
+__rwsem_wake(struct rw_semaphore *sem, int readers)
{
if (readers) {
if (test_and_set_bit(0, &sem->granted))
@@ -271,3 +381,67 @@ __do_rwsem_wake(struct rw_semaphore *sem, int readers)
wake_up(&sem->write_bias_wait);
}
}
+
+void
+down_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ __down_read(sem);
+#if WAITQUEUE_DEBUG
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+void
+down_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ __down_write(sem);
+#if WAITQUEUE_DEBUG
+ if (sem->granted & 3)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+void
+up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+ __up_read(sem);
+}
+
+void
+up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 3)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+ __up_write(sem);
+}
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 0edf60839..bc7beb7be 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -378,6 +378,9 @@ do_settimeofday(struct timeval *tv)
* BUG: This routine does not handle hour overflow properly; it just
* sets the minutes. Usually you won't notice until after reboot!
*/
+
+extern int abs(int);
+
static int
set_rtc_mmss(unsigned long nowtime)
{
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index d22a6f522..913331a95 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -12,7 +12,7 @@ OBJS = __divqu.o __remqu.o __divlu.o __remlu.o memset.o memcpy.o io.o \
strcat.o strcpy.o strncat.o strncpy.o stxcpy.o stxncpy.o \
strchr.o strrchr.o memchr.o \
copy_user.o clear_user.o strncpy_from_user.o strlen_user.o \
- csum_ipv6_magic.o strcasecmp.o semaphore.o fpreg.o \
+ csum_ipv6_magic.o strcasecmp.o fpreg.o \
callback_srm.o srm_puts.o srm_printk.o
lib.a: $(OBJS)
diff --git a/arch/alpha/lib/semaphore.S b/arch/alpha/lib/semaphore.S
deleted file mode 100644
index 517285ea4..000000000
--- a/arch/alpha/lib/semaphore.S
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * linux/arch/alpha/lib/semaphore.S
- *
- * Copyright (C) 1999, 2000 Richard Henderson
- */
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- */
-
- .set noat
- .set noreorder
- .align 4
-
-/* __down_failed takes the semaphore in $24, clobbers $24 and $28. */
-
- .globl __down_failed
- .ent __down_failed
-__down_failed:
- ldgp $29,0($27)
- lda $30, -20*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $25, 17*8($30)
- stq $26, 18*8($30)
- .frame $30, 20*8, $28
- .prologue 1
-
- mov $24, $16
- jsr __down
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $25, 17*8($30)
- ldq $26, 18*8($30)
- lda $30, 20*8($30)
- ret $31, ($28), 0
- .end __down_failed
-
-/* __down_failed_interruptible takes the semaphore in $24,
- clobbers $28, returns success in $24. */
-
- .globl __down_failed_interruptible
- .ent __down_failed_interruptible
-__down_failed_interruptible:
- ldgp $29,0($27)
- lda $30, -20*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $25, 17*8($30)
- stq $26, 18*8($30)
- .frame $30, 20*8, $28
- .prologue 1
-
- mov $24, $16
- jsr __down_interruptible
- mov $0, $24
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $25, 17*8($30)
- ldq $26, 18*8($30)
- lda $30, 20*8($30)
- ret $31, ($28), 0
- .end __down_failed_interruptible
-
-/* __up_wakeup takes the semaphore in $24, clobbers $24 and $28. */
-
- .globl __up_wakeup
- .ent __up_wakeup
-__up_wakeup:
- ldgp $29,0($27)
- lda $30, -20*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $25, 17*8($30)
- stq $26, 18*8($30)
- .frame $30, 20*8, $28
- .prologue 1
-
- mov $24, $16
- jsr __up
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $25, 17*8($30)
- ldq $26, 18*8($30)
- lda $30, 20*8($30)
- ret $31, ($28), 0
- .end __up_wakeup
-
-/* __down_read_failed takes the semaphore in $24, count in $25;
- clobbers $24, $25 and $28. */
-
- .globl __down_read_failed
- .ent __down_read_failed
-__down_read_failed:
- ldgp $29,0($27)
- lda $30, -18*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $26, 17*8($30)
- .frame $30, 18*8, $28
- .prologue 1
-
- mov $24, $16
- mov $25, $17
- jsr __down_read
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $26, 17*8($30)
- lda $30, 18*8($30)
- ret $31, ($28), 0
- .end __down_read_failed
-
-/* __down_write_failed takes the semaphore in $24, count in $25;
- clobbers $24, $25 and $28. */
-
- .globl __down_write_failed
- .ent __down_write_failed
-__down_write_failed:
- ldgp $29,0($27)
- lda $30, -20*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $26, 17*8($30)
- .frame $30, 18*8, $28
- .prologue 1
-
- mov $24, $16
- mov $25, $17
- jsr __down_write
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $26, 17*8($30)
- lda $30, 18*8($30)
- ret $31, ($28), 0
- .end __down_write_failed
-
-/* __rwsem_wake takes the semaphore in $24, readers in $25;
- clobbers $24, $25, and $28. */
-
- .globl __rwsem_wake
- .ent __rwsem_wake
-__rwsem_wake:
- ldgp $29,0($27)
- lda $30, -18*8($30)
- stq $28, 0*8($30)
- stq $0, 1*8($30)
- stq $1, 2*8($30)
- stq $2, 3*8($30)
- stq $3, 4*8($30)
- stq $4, 5*8($30)
- stq $5, 6*8($30)
- stq $6, 7*8($30)
- stq $7, 8*8($30)
- stq $16, 9*8($30)
- stq $17, 10*8($30)
- stq $18, 11*8($30)
- stq $19, 12*8($30)
- stq $20, 13*8($30)
- stq $21, 14*8($30)
- stq $22, 15*8($30)
- stq $23, 16*8($30)
- stq $26, 17*8($30)
- .frame $30, 18*8, $28
- .prologue 1
-
- mov $24, $16
- mov $25, $17
- jsr __do_rwsem_wake
-
- ldq $28, 0*8($30)
- ldq $0, 1*8($30)
- ldq $1, 2*8($30)
- ldq $2, 3*8($30)
- ldq $3, 4*8($30)
- ldq $4, 5*8($30)
- ldq $5, 6*8($30)
- ldq $6, 7*8($30)
- ldq $7, 8*8($30)
- ldq $16, 9*8($30)
- ldq $17, 10*8($30)
- ldq $18, 11*8($30)
- ldq $19, 12*8($30)
- ldq $20, 13*8($30)
- ldq $21, 14*8($30)
- ldq $22, 15*8($30)
- ldq $23, 16*8($30)
- ldq $26, 17*8($30)
- lda $30, 18*8($30)
- ret $31, ($28), 0
- .end __rwsem_wake