summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-10-20 21:10:58 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-10-20 21:10:58 +0000
commit3c230e92ef761debb676d8790010c30230415f90 (patch)
tree3184b411f0a099c3ee19a653c4a3525eebfdcbb1 /include
parentff28d8ce709b1137516ff67a7c398ccf3c93ab5e (diff)
Fix 32-bit version as well. I didn't really test these but they should
be ok as the code is almost identical with the 64-bit variant.
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/semaphore-helper.h144
-rw-r--r--include/asm-mips/semaphore.h79
2 files changed, 125 insertions, 98 deletions
diff --git a/include/asm-mips/semaphore-helper.h b/include/asm-mips/semaphore-helper.h
index 0517a1de9..824bcb49f 100644
--- a/include/asm-mips/semaphore-helper.h
+++ b/include/asm-mips/semaphore-helper.h
@@ -1,13 +1,14 @@
-/* $Id: semaphore-helper.h,v 1.3 1999/06/11 14:30:15 ralf Exp $
+/* $Id: semaphore-helper.h,v 1.5 1999/08/13 17:07:27 harald Exp $
*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999 Andrea Arcangeli
* (C) Copyright 1999 Ralf Baechle
+ * (C) Copyright 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_MIPS_SEMAPHORE_HELPER_H
-#define __ASM_MIPS_SEMAPHORE_HELPER_H
+#ifndef _ASM_SEMAPHORE_HELPER_H
+#define _ASM_SEMAPHORE_HELPER_H
#include <linux/config.h>
@@ -85,11 +86,11 @@ waking_non_zero(struct semaphore *sem)
int ret, tmp;
__asm__ __volatile__(
- "1:\tll\t%1,%2\n\t"
- "blez\t%1,2f\n\t"
- "subu\t%0,%1,1\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
+ "1:\tll\t%1, %2\n\t"
+ "blez\t%1, 2f\n\t"
+ "subu\t%0, %1, 1\n\t"
+ "sc\t%0, %2\n\t"
+ "beqz\t%0, 1b\n\t"
"2:"
".text"
: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
@@ -110,108 +111,111 @@ waking_non_zero(struct semaphore *sem)
*
* This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
*
- * This is crazy. Normally it stricly forbidden to use 64-bit operation
+ * This is crazy. Normally it stricly forbidden to use 64-bit operations
* in the 32-bit MIPS kernel. In this case it's however ok because if an
* interrupt has destroyed the upper half of registers sc will fail.
+ * Note also that this will not work for MIPS32 CPUS!
+ *
+ * Pseudocode:
+ *
+ * If(sem->waking > 0) {
+ * Decrement(sem->waking)
+ * Return(SUCCESS)
+ * } else If(segnal_pending(tsk)) {
+ * Increment(sem->count)
+ * Return(-EINTR)
+ * } else {
+ * Return(SLEEP)
+ * }
*/
+
static inline int
waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
{
long ret, tmp;
#ifdef __MIPSEB__
+
__asm__ __volatile__("
- .set mips3
.set push
+ .set mips3
.set noat
-0: lld %1,%2
- li %0,0
-
- bltz %1, 1f
- dli $1, 0xffffffff00000000
- daddu %1, $1
+0: lld %1, %2
+ li %0, 0
+ sll $1, %1, 0
+ blez $1, 1f
+ daddiu %1, %1, -1
li %0, 1
- b 2f
+ b 2f
1:
-
- beqz %3, 1f
- addiu $1, %1, 1
- dsll32 $1, $1, 0
- dsrl32 $1, $1, 0
- dsrl32 %1, %1, 0
- dsll32 %1, %1, 0
- or %1, $1
+ beqz %3, 2f
li %0, %4
- b 2f
-1:
- scd %1, %2
+ dli $1, 0x0000000100000000
+ daddu %1, %1, $1
2:
- beqz %1,0b
- .set pop
- .set mips0"
+ scd %1, %2
+ beqz %1, 0b
+
+ .set pop"
: "=&r"(ret), "=&r"(tmp), "=m"(*sem)
: "r"(signal_pending(tsk)), "i"(-EINTR));
-#endif
-#ifdef __MIPSEL__
- __asm__ __volatile__("
+#elif defined(__MIPSEL__)
+
+ __asm__ __volatile__("
.set mips3
.set push
.set noat
-0: lld %1,%2
- li %0,0
-
- bltz %1, 1f
- dli $1, 0xffffffff00000000
- daddu %1, $1
+0:
+ lld %1, %2
+ li %0, 0
+ blez %1, 1f
+ dli $1, 0x0000000100000000
+ dsubu %1, %1, $1
li %0, 1
b 2f
1:
-
- beqz %3, 1f
- addiu $1, %1, 1
+ beqz %3, 2f
+ li %0, %4
+ /*
+ * It would be nice to assume that sem->count
+ * is != -1, but we will guard against that case
+ */
+ daddiu $1, %1, 1
+ dsll32 $1, $1, 0
+ dsrl32 $1, $1, 0
dsrl32 %1, %1, 0
dsll32 %1, %1, 0
- or %1, $1
- li %0, %4
- b 2f
-1:
- scd %1, %2
+ or %1, %1, $1
2:
- beqz %1,0b
+ scd %1, %2
+ beqz %1, 0b
+
.set pop
.set mips0"
: "=&r"(ret), "=&r"(tmp), "=m"(*sem)
: "r"(signal_pending(tsk)), "i"(-EINTR));
+
+#else
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
#endif
return ret;
}
/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * XXX SMP ALERT
+ * waking_non_zero_trylock is unused. we do everything in
+ * down_trylock and let non-ll/sc hosts bounce around.
*/
-#ifdef __SMP__
-#error FIXME, waking_non_zero_trylock is broken for SMP.
-#endif
-static inline int waking_non_zero_trylock(struct semaphore *sem)
-{
- int ret = 1;
- if (atomic_read(&sem->waking) <= 0)
- atomic_inc(&sem->count);
- else {
- atomic_dec(&sem->waking);
- ret = 0;
- }
+static inline int
+waking_non_zero_trylock(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
- return ret;
+ return 0;
}
-#endif
-
-#endif /* __ASM_MIPS_SEMAPHORE_HELPER_H */
+#endif /* _ASM_SEMAPHORE_HELPER_H */
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index 0b283f4f4..40e51b09e 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,4 +1,4 @@
-/* $Id: semaphore.h,v 1.9 1999/08/13 17:07:27 harald Exp $
+/* $Id: semaphore.h,v 1.10 1999/10/09 00:01:43 ralf Exp $
*
* SMP- and interrupt-safe semaphores..
*
@@ -8,6 +8,7 @@
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1998, 1999 Ralf Baechle
+ * (C) Copyright 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_SEMAPHORE_H
#define _ASM_SEMAPHORE_H
@@ -109,7 +110,23 @@ extern inline int down_trylock(struct semaphore * sem)
* down_trylock returns 0 on success, 1 if we failed to get the lock.
*
* We must manipulate count and waking simultaneously and atomically.
- * Do this by using ll/sc on the pair of 32-bit words.
+ * Here, we this by using ll/sc on the pair of 32-bit words. This
+ * won't work on MIPS32 platforms, however, and must be rewritten.
+ *
+ * Pseudocode:
+ *
+ * Decrement(sem->count)
+ * If(sem->count >=0) {
+ * Return(SUCCESS) // resource is free
+ * } else {
+ * If(sem->waking <= 0) { // if no wakeup pending
+ * Increment(sem->count) // undo decrement
+ * Return(FAILURE)
+ * } else {
+ * Decrement(sem->waking) // otherwise "steal" wakeup
+ * Return(SUCCESS)
+ * }
+ * }
*/
extern inline int down_trylock(struct semaphore * sem)
{
@@ -121,55 +138,61 @@ extern inline int down_trylock(struct semaphore * sem)
#ifdef __MIPSEB__
__asm__ __volatile__("
.set mips3
+
0: lld %1, %4
dli %3, 0x0000000100000000
- sltu %0, %1, $0
-
- bltz %1, 1f
- move %3, $0
+ dsubu %1, %3
+ li %0, 0
+ bgez %1, 2f
+ sll %2, %1, 0
+ blez %2, 1f
+ daddiu %1, %1, -1
+ b 2f
1:
-
- sltu %2, %1, $0
- and %0, %0, %2
- bnez %0, 2f
-
- subu %0, %3
+ daddu %1, %1, %3
+ li %0, 1
+ 2:
scd %1, %4
-
beqz %1, 0b
- 2:
.set mips0"
: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
: "m"(*sem)
: "memory");
-#endif
-#ifdef __MIPSEL__
+#elif defined(__MIPSEL__)
+
__asm__ __volatile__("
.set mips3
- 0: lld %1, %4
- dli %3, 0x0000000100000000
- sltu %0, %1, $0
+ 0: lld %1, %4
+ li %0, 0
+ sll %2, %1, 0
+ addiu %2, %2, -1
+ bgez %2, 2f
bltz %1, 1f
- move %3, $0
+ dsll32 %2, %2, 0
+ dsrl32 %2, %2, 0
+ dli %3, 0x0000000100000000
+ dsubu %1, %3
+ b 2f
1:
-
- sltu %2, %1, $0
- and %0, %0, %2
- bnez %0, 2f
-
- subu %0, %3
+ li %0, 1
+ b 3f
+ 2:
+ dsrl32 %1, %1, 0
+ dsll32 %1, %1, 0
+ or %1, %1, %2
+ 3:
scd %1, %4
-
beqz %1, 0b
- 2:
.set mips0"
: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
: "m"(*sem)
: "memory");
+#else
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__"
#endif
return ret;