summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/atomic.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
committer <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
commit19c9bba94152148523ba0f7ef7cffe3d45656b11 (patch)
tree40b1cb534496a7f1ca0f5c314a523c69f1fee464 /include/asm-sparc/atomic.h
parent7206675c40394c78a90e74812bbdbf8cf3cca1be (diff)
Import of Linux/MIPS 2.1.36
Diffstat (limited to 'include/asm-sparc/atomic.h')
-rw-r--r--include/asm-sparc/atomic.h160
1 files changed, 90 insertions, 70 deletions
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 3e46c262e..b74eebb46 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -1,4 +1,4 @@
-/* atomic.h: These really suck for now.
+/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
@@ -6,99 +6,119 @@
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
-typedef int atomic_t;
+#ifdef __SMP__
+/* This is a temporary measure. -DaveM */
+typedef struct { volatile int counter; } atomic_t;
+#else
+typedef struct { int counter; } atomic_t;
+#endif
+
+#define ATOMIC_INIT(i) { (i << 8) }
#ifdef __KERNEL__
#include <asm/system.h>
#include <asm/psr.h>
-/*
- * Make sure gcc doesn't try to be clever and move things around
+/* We do the bulk of the actual work out of line in two common
+ * routines in assembler, see arch/sparc/lib/atomic.S for the
+ * "fun" details.
+ *
+ * For SMP the trick is you embed the spin lock byte within
+ * the word, use the low byte so signedness is easily retained
+ * via a quick arithmetic shift. It looks like this:
+ *
+ * ----------------------------------------
+ * | signed 24-bit counter value | lock | atomic_t
+ * ----------------------------------------
+ * 31 8 7 0
+ */
+
+static __inline__ int atomic_read(atomic_t *v)
+{
+ int val;
+
+ __asm__ __volatile__("sra %1, 0x8, %0"
+ : "=r" (val)
+ : "r" (v->counter));
+ return val;
+}
+#define atomic_set(v, i) (((v)->counter) = ((i) << 8))
+
+/* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
#define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x)
-static __inline__ void atomic_add(atomic_t i, atomic_t *v)
+static __inline__ void atomic_add(int i, atomic_t *v)
{
+ register atomic_t *ptr asm("g1");
+ register int increment asm("g2");
+ ptr = (atomic_t *) __atomic_fool_gcc(v);
+ increment = i;
+
__asm__ __volatile__("
- rd %%psr, %%g2
- andcc %%g2, %2, %%g0
- be,a 1f
- wr %%g2, %2, %%psr
-1: ld [%0], %%g3
- add %%g3, %1, %%g3
- andcc %%g2, %2, %%g0
- st %%g3, [%0]
- be,a 1f
- wr %%g2, 0x0, %%psr
-1: nop; nop;
- "
- : : "r" (__atomic_fool_gcc(v)), "r" (i), "i" (PSR_PIL)
- : "g2", "g3");
+ mov %%o7, %%g4
+ call ___atomic_add
+ add %%o7, 8, %%o7
+" : "=&r" (increment)
+ : "0" (increment), "r" (ptr)
+ : "g3", "g4", "g7", "memory", "cc");
}
-static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
+static __inline__ void atomic_sub(int i, atomic_t *v)
{
+ register atomic_t *ptr asm("g1");
+ register int increment asm("g2");
+
+ ptr = (atomic_t *) __atomic_fool_gcc(v);
+ increment = i;
+
__asm__ __volatile__("
- rd %%psr, %%g2
- andcc %%g2, %2, %%g0
- be,a 1f
- wr %%g2, %2, %%psr
-1: ld [%0], %%g3
- sub %%g3, %1, %%g3
- andcc %%g2, %2, %%g0
- st %%g3, [%0]
- be,a 1f
- wr %%g2, 0x0, %%psr
-1: nop; nop;
- "
- : : "r" (__atomic_fool_gcc(v)), "r" (i), "i" (PSR_PIL)
- : "g2", "g3");
+ mov %%o7, %%g4
+ call ___atomic_sub
+ add %%o7, 8, %%o7
+" : "=&r" (increment)
+ : "0" (increment), "r" (ptr)
+ : "g3", "g4", "g7", "memory", "cc");
}
-static __inline__ int atomic_add_return(atomic_t i, atomic_t *v)
+static __inline__ int atomic_add_return(int i, atomic_t *v)
{
+ register atomic_t *ptr asm("g1");
+ register int increment asm("g2");
+
+ ptr = (atomic_t *) __atomic_fool_gcc(v);
+ increment = i;
+
__asm__ __volatile__("
- rd %%psr, %%g2
- andcc %%g2, %3, %%g0
- be,a 1f
- wr %%g2, %3, %%psr
-1: ld [%1], %%g3
- add %%g3, %2, %0
- andcc %%g2, %3, %%g0
- st %0, [%1]
- be,a 1f
- wr %%g2, 0x0, %%psr
-1: nop; nop;
- "
- : "=&r" (i)
- : "r" (__atomic_fool_gcc(v)), "0" (i), "i" (PSR_PIL)
- : "g2", "g3");
-
- return i;
+ mov %%o7, %%g4
+ call ___atomic_add
+ add %%o7, 8, %%o7
+" : "=&r" (increment)
+ : "0" (increment), "r" (ptr)
+ : "g3", "g4", "g7", "memory", "cc");
+
+ return increment;
}
-static __inline__ int atomic_sub_return(atomic_t i, atomic_t *v)
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
+ register atomic_t *ptr asm("g1");
+ register int increment asm("g2");
+
+ ptr = (atomic_t *) __atomic_fool_gcc(v);
+ increment = i;
+
__asm__ __volatile__("
- rd %%psr, %%g2
- andcc %%g2, %3, %%g0
- be,a 1f
- wr %%g2, %3, %%psr
-1: ld [%1], %%g3
- sub %%g3, %2, %0
- andcc %%g2, %3, %%g0
- st %0, [%1]
- be,a 1f
- wr %%g2, 0x0, %%psr
-1: nop; nop;
- "
- : "=&r" (i)
- : "r" (__atomic_fool_gcc(v)), "0" (i), "i" (PSR_PIL)
- : "g2", "g3");
-
- return i;
+ mov %%o7, %%g4
+ call ___atomic_sub
+ add %%o7, 8, %%o7
+" : "=&r" (increment)
+ : "0" (increment), "r" (ptr)
+ : "g3", "g4", "g7", "memory", "cc");
+
+ return increment;
}
#define atomic_dec_return(v) atomic_sub_return(1,(v))