summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-09 15:44:17 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-09 15:44:17 +0000
commit1a538b9c2bb6cd97cf5e38ba15a52acafcdccc6e (patch)
tree9a7b389ad0fcbcaafde0d848bccbea0c7ad338fc
parent4954f6960f7890edf63b25b0e459d0f51d96119f (diff)
One more itsy bitsy tiny SMP bit ...
-rw-r--r--include/asm-mips64/smplock.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/include/asm-mips64/smplock.h b/include/asm-mips64/smplock.h
new file mode 100644
index 000000000..ad3bfd6ad
--- /dev/null
+++ b/include/asm-mips64/smplock.h
@@ -0,0 +1,54 @@
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#ifndef __ASM_SMPLOCK_H
+#define __ASM_SMPLOCK_H
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
+{
+ if (task->lock_depth >= 0)
+ spin_unlock(&kernel_flag);
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * Re-acquire the kernel lock
+ */
+static __inline__ void reacquire_kernel_lock(struct task_struct *task)
+{
+ if (task->lock_depth >= 0)
+ spin_lock(&kernel_flag);
+}
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+static __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+static __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
+
+#endif /* __ASM_SMPLOCK_H */