summaryrefslogtreecommitdiffstats
path: root/include/asm-mips/semaphore.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
committer <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
commit19c9bba94152148523ba0f7ef7cffe3d45656b11 (patch)
tree40b1cb534496a7f1ca0f5c314a523c69f1fee464 /include/asm-mips/semaphore.h
parent7206675c40394c78a90e74812bbdbf8cf3cca1be (diff)
Import of Linux/MIPS 2.1.36
Diffstat (limited to 'include/asm-mips/semaphore.h')
-rw-r--r--include/asm-mips/semaphore.h52
1 files changed, 45 insertions, 7 deletions
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index f093fe519..6d527848b 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -14,23 +14,61 @@
struct semaphore {
atomic_t count;
- atomic_t waiting;
+ atomic_t waking;
struct wait_queue * wait;
};
-#define MUTEX ((struct semaphore) { 1, 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { 0, 0, NULL })
+#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL })
+#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL })
extern void __down(struct semaphore * sem);
+extern int __down_interruptible(struct semaphore * sem);
extern void __up(struct semaphore * sem);
+#define sema_init(sem, val) atomic_set(&((sem)->count), val)
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * which we have. Let the rest of the losers suck eggs.
+ */
+
+static inline void wake_one_more(struct semaphore * sem)
+{
+ atomic_inc(&sem->waking);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ int ret, tmp;
+
+ __asm__ __volatile__(
+ "1:\tll\t%1,%2\n"
+ "blez\t%1,2f\n\t"
+ "subu\t%0,%1,1\n\t"
+ "sc\t%0,%2\n\t"
+ "beqz\t%0,1b\n\t"
+ "2:"
+ ".text"
+ : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
+ : "0"(0));
+
+ return ret;
+}
+
extern inline void down(struct semaphore * sem)
{
- while (1) {
- if (atomic_dec_return(&sem->count) >= 0)
- break;
+ if (atomic_dec_return(&sem->count) < 0)
__down(sem);
- }
+}
+
+extern inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+ if (atomic_dec_return(&sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
}
/*