summaryrefslogtreecommitdiffstats
path: root/include/asm-arm/proc-armo
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-08-28 22:00:09 +0000
commit1a1d77dd589de5a567fa95e36aa6999c704ceca4 (patch)
tree141e31f89f18b9fe0831f31852e0435ceaccafc5 /include/asm-arm/proc-armo
parentfb9c690a18b3d66925a65b17441c37fa14d4370b (diff)
Merge with 2.4.0-test7.
Diffstat (limited to 'include/asm-arm/proc-armo')
-rw-r--r--include/asm-arm/proc-armo/assembler.h26
-rw-r--r--include/asm-arm/proc-armo/cache.h3
-rw-r--r--include/asm-arm/proc-armo/locks.h158
-rw-r--r--include/asm-arm/proc-armo/system.h18
4 files changed, 201 insertions, 4 deletions
diff --git a/include/asm-arm/proc-armo/assembler.h b/include/asm-arm/proc-armo/assembler.h
index d611ea4dd..42b23aa89 100644
--- a/include/asm-arm/proc-armo/assembler.h
+++ b/include/asm-arm/proc-armo/assembler.h
@@ -6,10 +6,6 @@
* This file contains arm architecture specific defines
* for the different processors
*/
-#ifndef __ASSEMBLY__
-#error "Only include this from assembly code"
-#endif
-
#define MODE_USR USR26_MODE
#define MODE_FIQ FIQ26_MODE
#define MODE_IRQ IRQ26_MODE
@@ -60,3 +56,25 @@
#define SVCMODE(tmpreg)\
teqp pc, $0x00000003;\
mov r0, r0
+
+
+/*
+ * Save the current IRQ state and disable IRQs
+ * Note that this macro assumes FIQs are enabled, and
+ * that the processor is in SVC mode.
+ */
+ .macro save_and_disable_irqs, oldcpsr, temp
+ mov \oldcpsr, pc
+ orr \temp, \oldcpsr, #0x08000000
+ teqp \temp, #0
+ .endm
+
+/*
+ * Restore interrupt state previously stored in
+ * a register
+ * ** Actually do nothing on Arc - hope that the caller uses a MOVS PC soon
+ * after!
+ */
+ .macro restore_irqs, oldcpsr
+ @ This be restore_irqs
+ .endm
diff --git a/include/asm-arm/proc-armo/cache.h b/include/asm-arm/proc-armo/cache.h
index 75250fb3b..ca99bcf82 100644
--- a/include/asm-arm/proc-armo/cache.h
+++ b/include/asm-arm/proc-armo/cache.h
@@ -10,6 +10,9 @@
#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_range(start,end) do { } while (0)
+/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
+#define clean_cache_area(_start,_size) do { } while (0)
+
/*
* TLB flushing:
*
diff --git a/include/asm-arm/proc-armo/locks.h b/include/asm-arm/proc-armo/locks.h
new file mode 100644
index 000000000..fcf0cab01
--- /dev/null
+++ b/include/asm-arm/proc-armo/locks.h
@@ -0,0 +1,158 @@
+/*
+ * linux/include/asm-arm/proc-armo/locks.h
+ *
+ * Copyright (C) 2000 Russell King
+ * Fixes for 26 bit machines, (C) 2000 Dave Gilbert
+ *
+ * Interrupt safe locking assembler.
+ *
+ */
+#ifndef __ASM_PROC_LOCKS_H
+#define __ASM_PROC_LOCKS_H
+
+/* Decrements by 1, fails if value < 0 */
+#define __down_op(ptr,fail) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "@ atomic down operation\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" ldr lr, [%0]\n" \
+" and r0, r0, #0x0c000003\n" \
+" subs lr, lr, #1\n" \
+" str lr, [%0]\n" \
+" orrmi r0, r0, #0x80000000 @ set N\n" \
+" teqp r0, #0\n" \
+" movmi r0, %0\n" \
+" blmi " SYMBOL_NAME_STR(fail) \
+ : \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ })
+
+#define __down_op_ret(ptr,fail) \
+ ({ \
+ unsigned int result; \
+ __asm__ __volatile__ ( \
+" @ down_op_ret\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" ldr lr, [%1]\n" \
+" and r0, r0, #0x0c000003\n" \
+" subs lr, lr, #1\n" \
+" str lr, [%1]\n" \
+" orrmi r0, r0, #0x80000000 @ set N\n" \
+" teqp r0, #0\n" \
+" movmi r0, %1\n" \
+" movpl r0, #0\n" \
+" blmi " SYMBOL_NAME_STR(fail) "\n" \
+" mov %0, r0" \
+ : "=&r" (result) \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ result; \
+ })
+
+#define __up_op(ptr,wake) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "@ up_op\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" ldr lr, [%0]\n" \
+" and r0, r0, #0x0c000003\n" \
+" adds lr, lr, #1\n" \
+" str lr, [%0]\n" \
+" orrle r0, r0, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \
+" teqp r0, #0\n" \
+" movmi r0, %0\n" \
+" blmi " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ })
+
+/*
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes. BIAS must be chosen such that sub'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ */
+#define RW_LOCK_BIAS 0x01000000
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+/* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */
+#define __down_op_write(ptr,fail) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ down_op_write\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" and r0, r0, #0x0c000003\n" \
+\
+" ldr lr, [%0]\n" \
+" subs lr, lr, %1\n" \
+" str lr, [%0]\n" \
+\
+" orreq r0, r0, #0x40000000 @ set Z \n"\
+" teqp r0, #0\n" \
+" movne r0, %0\n" \
+" blne " SYMBOL_NAME_STR(fail) \
+ : \
+ : "r" (ptr), "I" (RW_LOCK_BIAS) \
+ : "r0", "lr", "cc"); \
+ })
+
+/* Increments by RW_LOCK_BIAS, wakes if value >= 0 */
+#define __up_op_write(ptr,wake) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ up_op_read\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+\
+" ldr lr, [%0]\n" \
+" and r0, r0, #0x0c000003\n" \
+" adds lr, lr, %1\n" \
+" str lr, [%0]\n" \
+\
+" orrcs r0, r0, #0x20000000 @ set C\n" \
+" teqp r0, #0\n" \
+" movcs r0, %0\n" \
+" blcs " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr), "I" (RW_LOCK_BIAS) \
+ : "r0", "lr", "cc"); \
+ })
+
+#define __down_op_read(ptr,fail) \
+ __down_op(ptr, fail)
+
+#define __up_op_read(ptr,wake) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ up_op_read\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+\
+" ldr lr, [%0]\n" \
+" and r0, r0, #0x0c000003\n" \
+" adds lr, lr, %1\n" \
+" str lr, [%0]\n" \
+\
+" orreq r0, r0, #0x40000000 @ Set Z \n" \
+" teqp r0, #0\n" \
+" moveq r0, %0\n" \
+" bleq " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr), "I" (1) \
+ : "r0", "lr", "cc"); \
+ })
+
+#endif
diff --git a/include/asm-arm/proc-armo/system.h b/include/asm-arm/proc-armo/system.h
index 36a3515e7..42a6bc70a 100644
--- a/include/asm-arm/proc-armo/system.h
+++ b/include/asm-arm/proc-armo/system.h
@@ -77,6 +77,24 @@ extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int
: "memory"); \
} while(0)
+#define __clf() do { \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+" mov %0, pc @ clf\n" \
+" orr %0, %0, #0x04000000\n" \
+" teqp %0, #0\n" \
+ : "=r" (temp)); \
+ } while(0)
+
+#define __stf() do { \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+" mov %0, pc @ stf\n" \
+" bic %0, %0, #0x04000000\n" \
+" teqp %0, #0\n" \
+ : "=r" (temp)); \
+ } while(0)
+
/*
* save current IRQ & FIQ state
*/