summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/softirq.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/softirq.h')
-rw-r--r--include/asm-sparc64/softirq.h88
1 files changed, 78 insertions, 10 deletions
diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h
index 3e5d7cc72..fa32f67e5 100644
--- a/include/asm-sparc64/softirq.h
+++ b/include/asm-sparc64/softirq.h
@@ -13,18 +13,15 @@
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
-extern atomic_t __sparc64_bh_counter;
#define get_active_bhs() (bh_mask & bh_active)
-#ifdef __SMP__
-#error SMP not supported on sparc64 yet
-#else
+#ifndef __SMP__
+
+extern int __sparc64_bh_counter;
-#define softirq_trylock() (atomic_read(&__sparc64_bh_counter) ? \
- 0 : \
- ((atomic_set(&__sparc64_bh_counter,1)),1))
-#define softirq_endlock() (atomic_set(&__sparc64_bh_counter, 0))
+#define softirq_trylock() (__sparc64_bh_counter ? 0 : (__sparc64_bh_counter=1))
+#define softirq_endlock() (__sparc64_bh_counter = 0)
#define clear_active_bhs(x) (bh_active &= ~(x))
#define init_bh(nr, routine) \
@@ -54,11 +51,82 @@ do { int ent = nr; \
bh_mask |= 1 << ent; \
} while(0)
+#define start_bh_atomic() do { __sparc64_bh_counter++; barrier(); } while(0)
+
+#define end_bh_atomic() do { barrier(); __sparc64_bh_counter--; } while(0)
+
+#else /* (__SMP__) */
+
+extern atomic_t __sparc64_bh_counter;
+
#define start_bh_atomic() \
do { atomic_inc(&__sparc64_bh_counter); synchronize_irq(); } while(0)
-#define end_bh_atomic() do { atomic_dec(&__sparc64_bh_counter); } while(0)
+#define end_bh_atomic() atomic_dec(&__sparc64_bh_counter)
+
+#include <asm/spinlock.h>
+
+#define init_bh(nr, routine) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_base[ent] = routine; \
+ bh_mask_count[ent] = 0; \
+ bh_mask |= 1 << ent; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define remove_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_base[ent] = NULL; \
+ bh_mask &= ~(1 << ent); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define mark_bh(nr) \
+do { unsigned long flags; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_active |= (1 << nr); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define disable_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_mask &= ~(1 << ent); \
+ bh_mask_count[ent]++; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define enable_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ if (!--bh_mask_count[ent]) \
+ bh_mask |= 1 << ent; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define softirq_trylock() \
+({ \
+ int ret = 1; \
+ if(atomic_add_return(1, &__sparc_bh_counter) != 1) { \
+ atomic_dec(&__sparc_bh_counter); \
+ ret = 0; \
+ } \
+ ret; \
+})
+#define softirq_endlock() atomic_dec(&__sparc_bh_counter)
+#define clear_active_bhs(mask) \
+do { unsigned long flags; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_active &= ~(mask); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
-#endif /* !(__SMP__) */
+#endif /* (__SMP__) */
#endif /* !(__SPARC64_SOFTIRQ_H) */