summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/semaphore.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-02-15 02:15:32 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-02-15 02:15:32 +0000
commit86464aed71025541805e7b1515541aee89879e33 (patch)
treee01a457a4912a8553bc65524aa3125d51f29f810 /include/asm-i386/semaphore.h
parent88f99939ecc6a95a79614574cb7d95ffccfc3466 (diff)
Merge with Linux 2.2.1.
Diffstat (limited to 'include/asm-i386/semaphore.h')
-rw-r--r--include/asm-i386/semaphore.h95
1 files changed, 85 insertions, 10 deletions
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 2cb1b891c..d78970da0 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -23,14 +23,49 @@
#include <asm/atomic.h>
#include <asm/spinlock.h>
+/*
+ * Semaphores are recursive: we allow the holder process
+ * to recursively do down() operations on a semaphore that
+ * the process already owns. In order to do that, we need
+ * to keep a semaphore-local copy of the owner and the
+ * "depth of ownership".
+ *
+ * NOTE! Nasty memory ordering rules:
+ * - "owner" and "owner_count" may only be modified once you hold the
+ * lock.
+ * - "owner_count" must be written _after_ modifying owner, and
+ * must be read _before_ reading owner. There must be appropriate
+ * write and read barriers to enforce this.
+ *
+ * On an x86, writes are always ordered, so the only enformcement
+ * necessary is to make sure that the owner_depth is written after
+ * the owner value in program order.
+ *
+ * For read ordering guarantees, the semaphore wake_lock spinlock
+ * is already giving us ordering guarantees.
+ *
+ * Other (saner) architectures would use "wmb()" and "rmb()" to
+ * do this in a more obvious manner.
+ */
struct semaphore {
atomic_t count;
+ unsigned long owner, owner_depth;
int waking;
struct wait_queue * wait;
};
-#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
+/*
+ * Because we want the non-contention case to be
+ * fast, we save the stack pointer into the "owner"
+ * field, and to get the true task pointer we have
+ * to do the bit masking. That moves the masking
+ * operation into the slow path.
+ */
+#define semaphore_owner(sem) \
+ ((struct task_struct *)((2*PAGE_MASK) & (sem)->owner))
+
+#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, 0, 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, 1, 0, NULL })
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
@@ -49,11 +84,6 @@ extern spinlock_t semaphore_wake_lock;
*
* This is trivially done with load_locked/store_cond,
* but on the x86 we need an external synchronizer.
- * Currently this is just the global interrupt lock,
- * bah. Go for a smaller spinlock some day.
- *
- * (On the other hand this shouldn't be in any critical
- * path, so..)
*/
static inline void wake_one_more(struct semaphore * sem)
{
@@ -64,13 +94,53 @@ static inline void wake_one_more(struct semaphore * sem)
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
}
-static inline int waking_non_zero(struct semaphore *sem)
+/*
+ * NOTE NOTE NOTE!
+ *
+ * We read owner-count _before_ getting the semaphore. This
+ * is important, because the semaphore also acts as a memory
+ * ordering point between reading owner_depth and reading
+ * the owner.
+ *
+ * Why is this necessary? The "owner_depth" essentially protects
+ * us from using stale owner information - in the case that this
+ * process was the previous owner but somebody else is racing to
+ * aquire the semaphore, the only way we can see ourselves as an
+ * owner is with "owner_depth" of zero (so that we know to avoid
+ * the stale value).
+ *
+ * In the non-race case (where we really _are_ the owner), there
+ * is not going to be any question about what owner_depth is.
+ *
+ * In the race case, the race winner will not even get here, because
+ * it will have successfully gotten the semaphore with the locked
+ * decrement operation.
+ *
+ * Basically, we have two values, and we cannot guarantee that either
+ * is really up-to-date until we have aquired the semaphore. But we
+ * _can_ depend on a ordering between the two values, so we can use
+ * one of them to determine whether we can trust the other:
+ *
+ * Cases:
+ * - owner_depth == zero: ignore the semaphore owner, because it
+ * cannot possibly be us. Somebody else may be in the process
+ * of modifying it and the zero may be "stale", but it sure isn't
+ * going to say that "we" are the owner anyway, so who cares?
+ * - owner_depth is non-zero. That means that even if somebody
+ * else wrote the non-zero count value, the write ordering requriement
+ * means that they will have written themselves as the owner, so
+ * if we now see ourselves as an owner we can trust it to be true.
+ */
+static inline int waking_non_zero(struct semaphore *sem, struct task_struct *tsk)
{
unsigned long flags;
+ unsigned long owner_depth = sem->owner_depth;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0) {
+ if (sem->waking > 0 || (owner_depth && semaphore_owner(sem) == tsk)) {
+ sem->owner = (unsigned long) tsk;
+ sem->owner_depth++; /* Don't use the possibly stale value */
sem->waking--;
ret = 1;
}
@@ -91,7 +161,9 @@ extern inline void down(struct semaphore * sem)
"lock ; "
#endif
"decl 0(%0)\n\t"
- "js 2f\n"
+ "js 2f\n\t"
+ "movl %%esp,4(%0)\n"
+ "movl $1,8(%0)\n\t"
"1:\n"
".section .text.lock,\"ax\"\n"
"2:\tpushl $1b\n\t"
@@ -113,6 +185,8 @@ extern inline int down_interruptible(struct semaphore * sem)
#endif
"decl 0(%1)\n\t"
"js 2f\n\t"
+ "movl %%esp,4(%1)\n\t"
+ "movl $1,8(%1)\n\t"
"xorl %0,%0\n"
"1:\n"
".section .text.lock,\"ax\"\n"
@@ -136,6 +210,7 @@ extern inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
+ "decl 8(%0)\n\t"
#ifdef __SMP__
"lock ; "
#endif