summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/spinlock.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /include/asm-i386/spinlock.h
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'include/asm-i386/spinlock.h')
-rw-r--r--include/asm-i386/spinlock.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 5d5b67ab2..4a92bb2a2 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -70,12 +70,12 @@ static inline int spin_trylock(spinlock_t *lock)
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
- :"=q" (oldval), "=m" (__dummy_lock(lock))
- :"0" (0));
+ :"=q" (oldval), "=m" (lock->lock)
+ :"0" (0) : "memory");
return oldval > 0;
}
-extern inline void spin_lock(spinlock_t *lock)
+static inline void spin_lock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
__label__ here;
@@ -87,10 +87,10 @@ printk("eip: %p\n", &&here);
#endif
__asm__ __volatile__(
spin_lock_string
- :"=m" (__dummy_lock(lock)));
+ :"=m" (lock->lock) : : "memory");
}
-extern inline void spin_unlock(spinlock_t *lock)
+static inline void spin_unlock(spinlock_t *lock)
{
#if SPINLOCK_DEBUG
if (lock->magic != SPINLOCK_MAGIC)
@@ -100,7 +100,7 @@ extern inline void spin_unlock(spinlock_t *lock)
#endif
__asm__ __volatile__(
spin_unlock_string
- :"=m" (__dummy_lock(lock)));
+ :"=m" (lock->lock) : : "memory");
}
/*
@@ -143,7 +143,7 @@ typedef struct {
*/
/* the spinlock helpers are in arch/i386/kernel/semaphore.S */
-extern inline void read_lock(rwlock_t *rw)
+static inline void read_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
if (rw->magic != RWLOCK_MAGIC)
@@ -152,7 +152,7 @@ extern inline void read_lock(rwlock_t *rw)
__build_read_lock(rw, "__read_lock_failed");
}
-extern inline void write_lock(rwlock_t *rw)
+static inline void write_lock(rwlock_t *rw)
{
#if SPINLOCK_DEBUG
if (rw->magic != RWLOCK_MAGIC)
@@ -161,10 +161,10 @@ extern inline void write_lock(rwlock_t *rw)
__build_write_lock(rw, "__write_lock_failed");
}
-#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" (__dummy_lock(&(rw)->lock)))
-#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" (__dummy_lock(&(rw)->lock)))
+#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
+#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-extern inline int write_trylock(rwlock_t *lock)
+static inline int write_trylock(rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))