From db7d4daea91e105e3859cf461d7e53b9b77454b2 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sun, 13 Jun 1999 16:29:25 +0000 Subject: Merge with Linux 2.2.8. --- include/asm-sparc/softirq.h | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'include/asm-sparc/softirq.h') diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h index 80c5d2e3e..4920aa865 100644 --- a/include/asm-sparc/softirq.h +++ b/include/asm-sparc/softirq.h @@ -1,21 +1,23 @@ /* softirq.h: 32-bit Sparc soft IRQ support. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au) + * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au) */ #ifndef __SPARC_SOFTIRQ_H #define __SPARC_SOFTIRQ_H +#include /* For NR_CPUS */ + #include #include #include -extern unsigned int local_bh_count[NR_CPUS]; #define get_active_bhs() (bh_mask & bh_active) #ifdef __SMP__ +extern unsigned int local_bh_count[NR_CPUS]; /* * The locking mechanism for base handlers, to prevent re-entrancy, @@ -23,7 +25,7 @@ extern unsigned int local_bh_count[NR_CPUS]; * referenced at all outside of this file. */ extern atomic_t global_bh_lock; -extern atomic_t global_bh_count; +extern spinlock_t global_bh_count; extern spinlock_t sparc_bh_lock; extern void synchronize_bh(void); @@ -41,7 +43,7 @@ extern inline void init_bh(int nr, void (*routine)(void)) unsigned long flags; spin_lock_irqsave(&sparc_bh_lock, flags); bh_base[nr] = routine; - bh_mask_count[nr] = 0; + atomic_set(&bh_mask_count[nr], 0); bh_mask |= 1 << nr; spin_unlock_irqrestore(&sparc_bh_lock, flags); } @@ -50,8 +52,8 @@ extern inline void remove_bh(int nr) { unsigned long flags; spin_lock_irqsave(&sparc_bh_lock, flags); - bh_base[nr] = NULL; bh_mask &= ~(1 << nr); + bh_base[nr] = NULL; spin_unlock_irqrestore(&sparc_bh_lock, flags); } @@ -72,7 +74,7 @@ extern inline void disable_bh(int nr) unsigned long flags; spin_lock_irqsave(&sparc_bh_lock, flags); bh_mask &= ~(1 << nr); - bh_mask_count[nr]++; + atomic_inc(&bh_mask_count[nr]); spin_unlock_irqrestore(&sparc_bh_lock, flags); synchronize_bh(); } @@ -81,7 +83,7 @@ extern inline void enable_bh(int nr) { unsigned long flags; spin_lock_irqsave(&sparc_bh_lock, flags); - if (!--bh_mask_count[nr]) + if (atomic_dec_and_test(&bh_mask_count[nr])) bh_mask |= 1 << nr; spin_unlock_irqrestore(&sparc_bh_lock, flags); } @@ -100,30 +102,31 @@ static inline void end_bh_atomic(void) /* These are for the IRQs testing the lock */ static inline int softirq_trylock(int cpu) { - if (atomic_add_return(1, &global_bh_count) == 1) { + if (spin_trylock(&global_bh_count)) { if (atomic_read(&global_bh_lock) == 0) { ++local_bh_count[cpu]; return 1; } + spin_unlock(&global_bh_count); } - atomic_dec(&global_bh_count); return 0; } static inline void softirq_endlock(int cpu) { local_bh_count[cpu]--; - atomic_dec(&global_bh_count); + spin_unlock(&global_bh_count); } #else +extern unsigned int local_bh_count; #define clear_active_bhs(x) (bh_active &= ~(x)) #define mark_bh(nr) (bh_active |= (1 << (nr))) /* These are for the irq's testing the lock */ -#define softirq_trylock(cpu) (local_bh_count[cpu] ? 0 : (local_bh_count[cpu]=1)) -#define softirq_endlock(cpu) (local_bh_count[cpu] = 0) +#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1)) +#define softirq_endlock(cpu) (local_bh_count = 0) #define synchronize_bh() barrier() /* @@ -133,39 +136,40 @@ static inline void softirq_endlock(int cpu) extern inline void disable_bh(int nr) { bh_mask &= ~(1 << nr); - bh_mask_count[nr]++; + atomic_inc(&bh_mask_count[nr]); synchronize_bh(); } extern inline void enable_bh(int nr) { - if (!--bh_mask_count[nr]) + if (atomic_dec_and_test(&bh_mask_count[nr])) bh_mask |= 1 << nr; } extern inline void init_bh(int nr, void (*routine)(void)) { bh_base[nr] = routine; - bh_mask_count[nr] = 0; + atomic_set(&bh_mask_count[nr], 0); bh_mask |= 1 << nr; } extern inline void remove_bh(int nr) { - bh_base[nr] = NULL; bh_mask &= ~(1 << nr); + mb(); + bh_base[nr] = NULL; } extern inline void start_bh_atomic(void) { - local_bh_count[0]++; + local_bh_count++; barrier(); } extern inline void end_bh_atomic(void) { barrier(); - local_bh_count[0]--; + local_bh_count--; } #endif /* SMP */ -- cgit v1.2.3