summaryrefslogtreecommitdiffstats
path: root/include/asm-mips/semaphore.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
commitdb7d4daea91e105e3859cf461d7e53b9b77454b2 (patch)
tree9bb65b95440af09e8aca63abe56970dd3360cc57 /include/asm-mips/semaphore.h
parent9c1c01ead627bdda9211c9abd5b758d6c687d8ac (diff)
Merge with Linux 2.2.8.
Diffstat (limited to 'include/asm-mips/semaphore.h')
-rw-r--r--include/asm-mips/semaphore.h75
1 files changed, 45 insertions, 30 deletions
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index b1ac4ecce..88c726546 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -25,42 +25,13 @@ struct semaphore {
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock;
#define sema_init(sem, val) atomic_set(&((sem)->count), val)
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-
-static inline void wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int waking_non_zero(struct semaphore *sem, struct task_struct *tsk)
-{
- int ret, tmp;
-
- __asm__ __volatile__(
- "1:\tll\t%1,%2\n"
- "blez\t%1,2f\n\t"
- "subu\t%0,%1,1\n\t"
- "sc\t%0,%2\n\t"
- "beqz\t%0,1b\n\t"
- "2:"
- ".text"
- : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
- : "0"(0));
-
- return ret;
-}
-
extern inline void down(struct semaphore * sem)
{
if (atomic_dec_return(&sem->count) < 0)
@@ -76,6 +47,50 @@ extern inline int down_interruptible(struct semaphore * sem)
}
/*
+ * down_trylock returns 0 on success, 1 if we failed to get the lock.
+ *
+ * We must manipulate count and waking simultaneously and atomically.
+ * Do this by using ll/sc on the pair of 32-bit words.
+ */
+extern inline int down_trylock(struct semaphore * sem)
+{
+ long ret, tmp, tmp2, sub;
+
+#ifdef __MIPSEB__
+ __asm__ __volatile__("
+ .set mips3
+ 0: lld %1, %4
+ dli %3, 0x0000000100000000
+ sltu %0, %1, $0
+
+ bltz %1, 1f
+ move %3, $0
+ 1:
+
+ sltu %2, %1, $0
+ and %0, %0, %2
+ bnez %0, 2f
+
+ subu %0, %3
+ scd %1, %4
+
+ beqz %1, 0b
+ 2:
+
+ .set mips0"
+ : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
+ : "m"(*sem)
+ : "memory");
+#endif
+
+#ifdef __MIPSEL__
+#error "FIXME: down_trylock doesn't support little endian machines yet."
+#endif
+
+ return ret;
+}
+
+/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/