summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
commitc7fc24dc4420057f103afe8fc64524ebc25c5d37 (patch)
tree3682407a599b8f9f03fc096298134cafba1c9b2f /include/asm-generic
parent1d793fade8b063fde3cf275bf1a5c2d381292cd9 (diff)
o Merge with Linux 2.1.116.
o New Newport console code. o New G364 console code.
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops.h22
-rw-r--r--include/asm-generic/smplock.h49
2 files changed, 71 insertions, 0 deletions
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 130619840..0ae108653 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -50,4 +50,26 @@ extern __inline__ int test_bit(int nr, int * addr)
mask = 1 << (nr & 0x1f);
return ((mask & *addr) != 0);
}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+#define ffs(x) generic_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
#endif /* _ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/smplock.h b/include/asm-generic/smplock.h
new file mode 100644
index 000000000..e62326a10
--- /dev/null
+++ b/include/asm-generic/smplock.h
@@ -0,0 +1,49 @@
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}