summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips64/semaphore-helper.h116
-rw-r--r--include/asm-mips64/semaphore.h78
2 files changed, 131 insertions, 63 deletions
diff --git a/include/asm-mips64/semaphore-helper.h b/include/asm-mips64/semaphore-helper.h
index d685b89e6..908d16c4b 100644
--- a/include/asm-mips64/semaphore-helper.h
+++ b/include/asm-mips64/semaphore-helper.h
@@ -5,6 +5,7 @@
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999 Andrea Arcangeli
* (C) Copyright 1999 Ralf Baechle
+ * (C) Copyright 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_SEMAPHORE_HELPER_H
#define _ASM_SEMAPHORE_HELPER_H
@@ -23,11 +24,11 @@ waking_non_zero(struct semaphore *sem)
int ret, tmp;
__asm__ __volatile__(
- "1:\tll\t%1,%2\n\t"
- "blez\t%1,2f\n\t"
- "subu\t%0,%1,1\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
+ "1:\tll\t%1, %2\n\t"
+ "blez\t%1, 2f\n\t"
+ "subu\t%0, %1, 1\n\t"
+ "sc\t%0, %2\n\t"
+ "beqz\t%0, 1b\n\t"
"2:"
".text"
: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
@@ -48,78 +49,101 @@ waking_non_zero(struct semaphore *sem)
*
* This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
*
- * This is crazy. Normally it stricly forbidden to use 64-bit operation
- * in the 32-bit MIPS kernel. In this case it's however ok because if an
- * interrupt has destroyed the upper half of registers sc will fail.
+ * Pseudocode:
+ *
+ * If(sem->waking > 0) {
+ * Decrement(sem->waking)
+ * Return(SUCCESS)
+ * } else If(segnal_pending(tsk)) {
+ * Increment(sem->count)
+ * Return(-EINTR)
+ * } else {
+ * Return(SLEEP)
+ * }
*/
+
static inline int
waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
{
long ret, tmp;
#ifdef __MIPSEB__
+
__asm__ __volatile__("
- .set mips3
.set push
.set noat
-0: lld %1,%2
- li %0,0
+0: lld %1, %2
+ li %0, 0
+ sll $1, %1, 0
+ blez $1, 1f
+ daddiu %1, %1, -1
+ li %0, 1
+ b 2f
+1:
+ beqz %3, 2f
+ li %0, %4
+ dli $1, 0x0000000100000000
+ daddu %1, %1, $1
+2:
+ scd %1, %2
+ beqz %1, 0b
+ .set pop"
+ : "=&r"(ret), "=&r"(tmp), "=m"(*sem)
+ : "r"(signal_pending(tsk)), "i"(-EINTR));
+
+#elif defined(__MIPSEL__)
- bltz %1, 1f
- dli $1, 0xffffffff00000000
- daddu %1, $1
+ __asm__ __volatile__("
+ .set push
+ .set noat
+0:
+ lld %1, %2
+ li %0, 0
+ blez %1, 1f
+ dli $1, 0x0000000100000000
+ dsubu %1, %1, $1
li %0, 1
b 2f
1:
-
- beqz %3, 1f
- addiu $1, %1, 1
+ beqz %3, 2f
+ li %0, %4
+ /*
+ * It would be nice to assume that sem->count
+ * is != -1, but we will guard against that case
+ */
+ daddiu $1, %1, 1
dsll32 $1, $1, 0
dsrl32 $1, $1, 0
dsrl32 %1, %1, 0
dsll32 %1, %1, 0
- or %1, $1
- li %0, %4
- b 2f
-1:
- scd %1, %2
+ or %1, %1, $1
2:
- beqz %1,0b
- .set pop
- .set mips0"
+ scd %1, %2
+ beqz %1, 0b
+ .set pop"
: "=&r"(ret), "=&r"(tmp), "=m"(*sem)
: "r"(signal_pending(tsk)), "i"(-EINTR));
-#endif
-#ifdef __MIPSEL__
-#error "FIXME: waking_non_zero_interruptible doesn't support little endian machines yet."
+#else
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
#endif
return ret;
}
/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * XXX SMP ALERT
+ * waking_non_zero_trylock is unused. we do everything in
+ * down_trylock and let non-ll/sc hosts bounce around.
*/
-#ifdef __SMP__
-#error FIXME, waking_non_zero_trylock is broken for SMP.
-#endif
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- if (atomic_read(&sem->waking) <= 0)
- atomic_inc(&sem->count);
- else {
- atomic_dec(&sem->waking);
- ret = 0;
- }
+static inline int
+waking_non_zero_trylock(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
- return ret;
+ return 0;
}
#endif /* _ASM_SEMAPHORE_HELPER_H */
diff --git a/include/asm-mips64/semaphore.h b/include/asm-mips64/semaphore.h
index 868396781..b28d9999f 100644
--- a/include/asm-mips64/semaphore.h
+++ b/include/asm-mips64/semaphore.h
@@ -1,4 +1,4 @@
-/* $Id$
+/* $Id: semaphore.h,v 1.1 1999/08/18 23:37:52 ralf Exp $
*
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -12,7 +12,7 @@
#include <asm/system.h>
#include <asm/atomic.h>
-#include <asm/spinlock.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
struct semaphore {
@@ -94,7 +94,22 @@ extern inline int down_interruptible(struct semaphore * sem)
* down_trylock returns 0 on success, 1 if we failed to get the lock.
*
* We must manipulate count and waking simultaneously and atomically.
- * Do this by using ll/sc on the pair of 32-bit words.
+ * Here, we this by using ll/sc on the pair of 32-bit words.
+ *
+ * Pseudocode:
+ *
+ * Decrement(sem->count)
+ * If(sem->count >=0) {
+ * Return(SUCCESS) // resource is free
+ * } else {
+ * If(sem->waking <= 0) { // if no wakeup pending
+ * Increment(sem->count) // undo decrement
+ * Return(FAILURE)
+ * } else {
+ * Decrement(sem->waking) // otherwise "steal" wakeup
+ * Return(SUCCESS)
+ * }
+ * }
*/
extern inline int down_trylock(struct semaphore * sem)
{
@@ -106,32 +121,61 @@ extern inline int down_trylock(struct semaphore * sem)
#ifdef __MIPSEB__
__asm__ __volatile__("
.set mips3
+
0: lld %1, %4
dli %3, 0x0000000100000000
- sltu %0, %1, $0
-
- bltz %1, 1f
- move %3, $0
+ dsubu %1, %3
+ li %0, 0
+ bgez %1, 2f
+ sll %2, %1, 0
+ blez %2, 1f
+ daddiu %1, %1, -1
+ b 2f
1:
+ daddu %1, %1, %3
+ li %0, 1
+ 2:
+ scd %1, %4
+ beqz %1, 0b
- sltu %2, %1, $0
- and %0, %0, %2
- bnez %0, 2f
+ .set mips0"
+ : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
+ : "m"(*sem)
+ : "memory");
- subu %0, %3
- scd %1, %4
+#elif defined(__MIPSEL__)
- beqz %1, 0b
+ __asm__ __volatile__("
+ .set mips3
+
+ 0: lld %1, %4
+ li %0, 0
+ sll %2, %1, 0
+ addiu %2, %2, -1
+ bgez %2, 2f
+ bltz %1, 1f
+ dsll32 %2, %2, 0
+ dsrl32 %2, %2, 0
+ dli %3, 0x0000000100000000
+ dsubu %1, %3
+ b 2f
+ 1:
+ li %0, 1
+ b 3f
2:
+ dsrl32 %1, %1, 0
+ dsll32 %1, %1, 0
+ or %1, %1, %2
+ 3:
+ scd %1, %4
+ beqz %1, 0b
.set mips0"
: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
: "m"(*sem)
: "memory");
-#endif
-
-#ifdef __MIPSEL__
-#error "FIXME: down_trylock doesn't support little endian machines yet."
+#else
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__"
#endif
return ret;