summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/spinlock.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
commit545f435ebcfd94a1e7c20b46efe81b4d6ac4e698 (patch)
treee9ce4bc598d06374bda906f18365984bf22a526a /include/asm-sparc64/spinlock.h
parent4291a610eef89d0d5c69d9a10ee6560e1aa36c74 (diff)
Merge with Linux 2.1.55. More bugfixes and goodies from my private
CVS archive.
Diffstat (limited to 'include/asm-sparc64/spinlock.h')
-rw-r--r--include/asm-sparc64/spinlock.h187
1 files changed, 87 insertions, 100 deletions
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index cf2e51c71..65880b033 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -68,33 +68,35 @@ typedef struct { } rwlock_t;
typedef unsigned char spinlock_t;
#define SPIN_LOCK_UNLOCKED 0
-#define spin_lock_init(lock) (*(lock) = 0)
-#define spin_unlock_wait(lock) do { barrier(); } while(*(volatile spinlock_t *)lock)
+
+#define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
+
+#define spin_unlock_wait(lock) \
+do { membar("#LoadLoad"); \
+} while(*((volatile unsigned char *)lock))
extern __inline__ void spin_lock(spinlock_t *lock)
{
__asm__ __volatile__("
-1: ldstub [%0], %%g2
- brz,pt %%g2, 2f
- membar #LoadLoad | #LoadStore
- b,a %%xcc, 3f
-2:
- .text 2
-3: ldub [%0], %%g2
-4: brnz,a,pt %%g2, 4b
- ldub [%0], %%g2
- b,a 1b
+1: ldstub [%0], %%g7
+ brnz,pn %%g7, 2f
+ membar #StoreLoad | #StoreStore
+ .subsection 2
+2: ldub [%0], %%g7
+ brnz,pt %%g7, 2b
+ membar #LoadLoad
+ b,a,pt %%xcc, 1b
.previous
" : /* no outputs */
: "r" (lock)
- : "g2", "memory");
+ : "g7", "memory");
}
extern __inline__ int spin_trylock(spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0\n\t"
- "membar #LoadLoad | #LoadStore"
+ "membar #StoreLoad | #StoreStore"
: "=r" (result)
: "r" (lock)
: "memory");
@@ -104,7 +106,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock)
extern __inline__ void spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
- "stb %%g0, [%0]"
+ "stb %%g0, [%0]\n\t"
: /* No outputs */
: "r" (lock)
: "memory");
@@ -114,20 +116,18 @@ extern __inline__ void spin_lock_irq(spinlock_t *lock)
{
__asm__ __volatile__("
wrpr %%g0, 15, %%pil
-1: ldstub [%0], %%g2
- brz,pt %%g2, 2f
- membar #LoadLoad | #LoadStore
- b,a 3f
-2:
- .text 2
-3: ldub [%0], %%g2
-4: brnz,a,pt %%g2, 4b
- ldub [%0], %%g2
- b,a 1b
+1: ldstub [%0], %%g7
+ brnz,pn %%g7, 2f
+ membar #StoreLoad | #StoreStore
+ .subsection 2
+2: ldub [%0], %%g7
+ brnz,pt %%g7, 2b
+ membar #LoadLoad
+ b,a,pt %%xcc, 1b
.previous
" : /* no outputs */
: "r" (lock)
- : "g2", "memory");
+ : "g7", "memory");
}
extern __inline__ void spin_unlock_irq(spinlock_t *lock)
@@ -147,20 +147,18 @@ do { register spinlock_t *lp asm("g1"); \
__asm__ __volatile__( \
"\n rdpr %%pil, %0\n" \
" wrpr %%g0, 15, %%pil\n" \
- "1: ldstub [%1], %%g2\n" \
- " brz,pt %%g2, 2f\n" \
- " membar #LoadLoad | #LoadStore\n" \
- " b,a 3f\n" \
- "2:\n" \
- " .text 2\n" \
- "3: ldub [%1], %%g2\n" \
- "4: brnz,a,pt %%g2, 4b\n" \
- " ldub [%1], %%g2\n" \
- " b,a 1b\n" \
+ "1: ldstub [%1], %%g7\n" \
+ " brnz,pn %%g7, 2f\n" \
+ " membar #StoreLoad | #StoreStore\n" \
+ " .subsection 2\n" \
+ "2: ldub [%1], %%g7\n" \
+ " brnz,pt %%g7, 2b\n" \
+ " membar #LoadLoad\n" \
+ " b,a,pt %%xcc, 1b\n" \
" .previous\n" \
: "=&r" (flags) \
: "r" (lp) \
- : "g2", "memory"); \
+ : "g7", "memory"); \
} while(0)
extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
@@ -182,94 +180,83 @@ typedef unsigned long rwlock_t;
extern __inline__ void read_lock(rwlock_t *rw)
{
__asm__ __volatile__("
- ldx [%0], %%g2
-1: brgez,pt %%g2, 4f
- add %%g2, 1, %%g3
- b,a 2f
-4: casx [%0], %%g2, %%g3
- cmp %%g2, %%g3
- bne,a,pn %%xcc, 1b
- ldx [%0], %%g2
- membar #LoadLoad | #LoadStore
- .text 2
-2: ldx [%0], %%g2
-3: brlz,a,pt %%g2, 3b
- ldx [%0], %%g2
- b 4b
- add %%g2, 1, %%g3
+1: ldx [%0], %%g5
+ brlz,pn %%g5, 2f
+4: add %%g5, 1, %%g7
+ casx [%0], %%g5, %%g7
+ cmp %%g5, %%g7
+ bne,pn %%xcc, 1b
+ membar #StoreLoad | #StoreStore
+ .subsection 2
+2: ldx [%0], %%g5
+ brlz,pt %%g5, 2b
+ membar #LoadLoad
+ b,a,pt %%xcc, 4b
.previous
" : /* no outputs */
: "r" (rw)
- : "g2", "g3", "cc", "memory");
+ : "g5", "g7", "cc", "memory");
}
extern __inline__ void read_unlock(rwlock_t *rw)
{
__asm__ __volatile__("
- membar #StoreStore | #LoadStore
- ldx [%0], %%g2
-1: sub %%g2, 1, %%g3
- casx [%0], %%g2, %%g3
- cmp %%g2, %%g3
- bne,a,pn %%xcc, 1b
- ldx [%0], %%g2
+1: ldx [%0], %%g5
+ sub %%g5, 1, %%g7
+ casx [%0], %%g5, %%g7
+ cmp %%g5, %%g7
+ bne,pn %%xcc, 1b
+ membar #StoreLoad | #StoreStore
" : /* no outputs */
: "r" (rw)
- : "g2", "g3", "cc", "memory");
+ : "g5", "g7", "cc", "memory");
}
extern __inline__ void write_lock(rwlock_t *rw)
{
__asm__ __volatile__("
- sethi %%uhi(0x8000000000000000), %%g5
- ldx [%0], %%g2
- sllx %%g5, 32, %%g5
-1: brgez,pt %%g2, 4f
- or %%g2, %%g5, %%g3
- b,a 5f
-4: casx [%0], %%g2, %%g3
- cmp %%g2, %%g3
- bne,a,pn %%xcc, 1b
- ldx [%0], %%g2
- andncc %%g3, %%g5, %%g0
- be,pt %%xcc, 2f
- membar #LoadLoad | #LoadStore
- b,a 7f
-2:
- .text 2
-7: ldx [%0], %%g2
-3: andn %%g2, %%g5, %%g3
- casx [%0], %%g2, %%g3
- cmp %%g2, %%g3
- bne,a,pn %%xcc, 3b
- ldx [%0], %%g2
- membar #LoadLoad | #LoadStore
-5: ldx [%0], %%g2
-6: brlz,a,pt %%g2, 6b
- ldx [%0], %%g2
- b 4b
- or %%g2, %%g5, %%g3
+ sethi %%uhi(0x8000000000000000), %%g3
+ sllx %%g3, 32, %%g3
+1: ldx [%0], %%g5
+ brlz,pn %%g5, 5f
+4: or %%g5, %%g3, %%g7
+ casx [%0], %%g5, %%g7
+ cmp %%g5, %%g7
+ bne,pn %%xcc, 1b
+ andncc %%g7, %%g3, %%g0
+ bne,pn %%xcc, 7f
+ membar #StoreLoad | #StoreStore
+ .subsection 2
+7: ldx [%0], %%g5
+ andn %%g5, %%g3, %%g7
+ casx [%0], %%g5, %%g7
+ cmp %%g5, %%g7
+ bne,pn %%xcc, 7b
+ membar #StoreLoad | #StoreStore
+5: ldx [%0], %%g5
+ brnz,pt %%g5, 5b
+ membar #LoadLoad
+ b,a,pt %%xcc, 4b
.previous
" : /* no outputs */
: "r" (rw)
- : "g2", "g3", "g5", "memory", "cc");
+ : "g3", "g5", "g7", "memory", "cc");
}
extern __inline__ void write_unlock(rwlock_t *rw)
{
__asm__ __volatile__("
- membar #StoreStore | #LoadStore
- sethi %%uhi(0x8000000000000000), %%g5
- ldx [%0], %%g2
- sllx %%g5, 32, %%g5
-1: andn %%g2, %%g5, %%g3
- casx [%0], %%g2, %%g3
- cmp %%g2, %%g3
- bne,a,pn %%xcc, 1b
- ldx [%0], %%g2
+ sethi %%uhi(0x8000000000000000), %%g3
+ sllx %%g3, 32, %%g3
+1: ldx [%0], %%g5
+ andn %%g5, %%g3, %%g7
+ casx [%0], %%g5, %%g7
+ cmp %%g5, %%g7
+ bne,pn %%xcc, 1b
+ membar #StoreLoad | #StoreStore
" : /* no outputs */
: "r" (rw)
- : "g2", "g3", "g5", "memory", "cc");
+ : "g3", "g5", "g7", "memory", "cc");
}
#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)