summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/softirq.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/softirq.h')
-rw-r--r--include/asm-i386/softirq.h81
1 files changed, 46 insertions, 35 deletions
diff --git a/include/asm-i386/softirq.h b/include/asm-i386/softirq.h
index 07a678435..008edf305 100644
--- a/include/asm-i386/softirq.h
+++ b/include/asm-i386/softirq.h
@@ -4,6 +4,8 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
+extern unsigned int local_bh_count[NR_CPUS];
+
#define get_active_bhs() (bh_mask & bh_active)
#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
@@ -25,22 +27,6 @@ extern inline void mark_bh(int nr)
set_bit(nr, &bh_active);
}
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- bh_mask_count[nr]++;
-}
-
-extern inline void enable_bh(int nr)
-{
- if (!--bh_mask_count[nr])
- bh_mask |= 1 << nr;
-}
-
#ifdef __SMP__
/*
@@ -48,52 +34,77 @@ extern inline void enable_bh(int nr)
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
-extern atomic_t __intel_bh_counter;
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
-extern inline void start_bh_atomic(void)
+extern void synchronize_bh(void);
+
+static inline void start_bh_atomic(void)
{
- atomic_inc(&__intel_bh_counter);
- synchronize_irq();
+ atomic_inc(&global_bh_lock);
+ synchronize_bh();
}
-extern inline void end_bh_atomic(void)
+static inline void end_bh_atomic(void)
{
- atomic_dec(&__intel_bh_counter);
+ atomic_dec(&global_bh_lock);
}
/* These are for the irq's testing the lock */
-static inline int softirq_trylock(void)
+static inline int softirq_trylock(int cpu)
{
- atomic_inc(&__intel_bh_counter);
- if (atomic_read(&__intel_bh_counter) != 1) {
- atomic_dec(&__intel_bh_counter);
- return 0;
+ if (!test_and_set_bit(0,&global_bh_count)) {
+ if (atomic_read(&global_bh_lock) == 0) {
+ ++local_bh_count[cpu];
+ return 1;
+ }
+ clear_bit(0,&global_bh_count);
}
- return 1;
+ return 0;
}
-#define softirq_endlock() atomic_dec(&__intel_bh_counter)
+static inline void softirq_endlock(int cpu)
+{
+ local_bh_count[cpu]--;
+ clear_bit(0,&global_bh_count);
+}
#else
-extern int __intel_bh_counter;
-
extern inline void start_bh_atomic(void)
{
- __intel_bh_counter++;
+ local_bh_count[smp_processor_id()]++;
barrier();
}
extern inline void end_bh_atomic(void)
{
barrier();
- __intel_bh_counter--;
+ local_bh_count[smp_processor_id()]--;
}
/* These are for the irq's testing the lock */
-#define softirq_trylock() (__intel_bh_counter ? 0 : (__intel_bh_counter=1))
-#define softirq_endlock() (__intel_bh_counter = 0)
+#define softirq_trylock(cpu) (local_bh_count[cpu] ? 0 : (local_bh_count[cpu]=1))
+#define softirq_endlock(cpu) (local_bh_count[cpu] = 0)
+#define synchronize_bh() do { } while (0)
#endif /* SMP */
+/*
+ * These use a mask count to correctly handle
+ * nested disable/enable calls
+ */
+extern inline void disable_bh(int nr)
+{
+ bh_mask &= ~(1 << nr);
+ bh_mask_count[nr]++;
+ synchronize_bh();
+}
+
+extern inline void enable_bh(int nr)
+{
+ if (!--bh_mask_count[nr])
+ bh_mask |= 1 << nr;
+}
+
#endif /* __ASM_SOFTIRQ_H */